1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/crc32c.h> 4 #include <linux/ctype.h> 5 #include <linux/highmem.h> 6 #include <linux/inet.h> 7 #include <linux/kthread.h> 8 #include <linux/net.h> 9 #include <linux/slab.h> 10 #include <linux/socket.h> 11 #include <linux/string.h> 12 #include <linux/bio.h> 13 #include <linux/blkdev.h> 14 #include <linux/dns_resolver.h> 15 #include <net/tcp.h> 16 17 #include <linux/ceph/libceph.h> 18 #include <linux/ceph/messenger.h> 19 #include <linux/ceph/decode.h> 20 #include <linux/ceph/pagelist.h> 21 #include <linux/export.h> 22 23 /* 24 * Ceph uses the messenger to exchange ceph_msg messages with other 25 * hosts in the system. The messenger provides ordered and reliable 26 * delivery. We tolerate TCP disconnects by reconnecting (with 27 * exponential backoff) in the case of a fault (disconnection, bad 28 * crc, protocol error). Acks allow sent messages to be discarded by 29 * the sender. 30 */ 31 32 /* 33 * We track the state of the socket on a given connection using 34 * values defined below. The transition to a new socket state is 35 * handled by a function which verifies we aren't coming from an 36 * unexpected state. 37 * 38 * -------- 39 * | NEW* | transient initial state 40 * -------- 41 * | con_sock_state_init() 42 * v 43 * ---------- 44 * | CLOSED | initialized, but no socket (and no 45 * ---------- TCP connection) 46 * ^ \ 47 * | \ con_sock_state_connecting() 48 * | ---------------------- 49 * | \ 50 * + con_sock_state_closed() \ 51 * |+--------------------------- \ 52 * | \ \ \ 53 * | ----------- \ \ 54 * | | CLOSING | socket event; \ \ 55 * | ----------- await close \ \ 56 * | ^ \ | 57 * | | \ | 58 * | + con_sock_state_closing() \ | 59 * | / \ | | 60 * | / --------------- | | 61 * | / \ v v 62 * | / -------------- 63 * | / -----------------| CONNECTING | socket created, TCP 64 * | | / -------------- connect initiated 65 * | | | con_sock_state_connected() 66 * | | v 67 * ------------- 68 * | CONNECTED | TCP connection established 69 * ------------- 70 * 71 * State values for ceph_connection->sock_state; NEW is assumed to be 0. 72 */ 73 74 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */ 75 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */ 76 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */ 77 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */ 78 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */ 79 80 /* 81 * connection states 82 */ 83 #define CON_STATE_CLOSED 1 /* -> PREOPEN */ 84 #define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */ 85 #define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */ 86 #define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */ 87 #define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */ 88 #define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */ 89 90 /* 91 * ceph_connection flag bits 92 */ 93 #define CON_FLAG_LOSSYTX 0 /* we can close channel or drop 94 * messages on errors */ 95 #define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */ 96 #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */ 97 #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */ 98 #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */ 99 100 /* static tag bytes (protocol control messages) */ 101 static char tag_msg = CEPH_MSGR_TAG_MSG; 102 static char tag_ack = CEPH_MSGR_TAG_ACK; 103 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; 104 105 #ifdef CONFIG_LOCKDEP 106 static struct lock_class_key socket_class; 107 #endif 108 109 /* 110 * When skipping (ignoring) a block of input we read it into a "skip 111 * buffer," which is this many bytes in size. 112 */ 113 #define SKIP_BUF_SIZE 1024 114 115 static void queue_con(struct ceph_connection *con); 116 static void con_work(struct work_struct *); 117 static void ceph_fault(struct ceph_connection *con); 118 119 /* 120 * Nicely render a sockaddr as a string. An array of formatted 121 * strings is used, to approximate reentrancy. 122 */ 123 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */ 124 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG) 125 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1) 126 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */ 127 128 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN]; 129 static atomic_t addr_str_seq = ATOMIC_INIT(0); 130 131 static struct page *zero_page; /* used in certain error cases */ 132 133 const char *ceph_pr_addr(const struct sockaddr_storage *ss) 134 { 135 int i; 136 char *s; 137 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 138 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 139 140 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK; 141 s = addr_str[i]; 142 143 switch (ss->ss_family) { 144 case AF_INET: 145 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr, 146 ntohs(in4->sin_port)); 147 break; 148 149 case AF_INET6: 150 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr, 151 ntohs(in6->sin6_port)); 152 break; 153 154 default: 155 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)", 156 ss->ss_family); 157 } 158 159 return s; 160 } 161 EXPORT_SYMBOL(ceph_pr_addr); 162 163 static void encode_my_addr(struct ceph_messenger *msgr) 164 { 165 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr)); 166 ceph_encode_addr(&msgr->my_enc_addr); 167 } 168 169 /* 170 * work queue for all reading and writing to/from the socket. 171 */ 172 static struct workqueue_struct *ceph_msgr_wq; 173 174 void _ceph_msgr_exit(void) 175 { 176 if (ceph_msgr_wq) { 177 destroy_workqueue(ceph_msgr_wq); 178 ceph_msgr_wq = NULL; 179 } 180 181 BUG_ON(zero_page == NULL); 182 kunmap(zero_page); 183 page_cache_release(zero_page); 184 zero_page = NULL; 185 } 186 187 int ceph_msgr_init(void) 188 { 189 BUG_ON(zero_page != NULL); 190 zero_page = ZERO_PAGE(0); 191 page_cache_get(zero_page); 192 193 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0); 194 if (ceph_msgr_wq) 195 return 0; 196 197 pr_err("msgr_init failed to create workqueue\n"); 198 _ceph_msgr_exit(); 199 200 return -ENOMEM; 201 } 202 EXPORT_SYMBOL(ceph_msgr_init); 203 204 void ceph_msgr_exit(void) 205 { 206 BUG_ON(ceph_msgr_wq == NULL); 207 208 _ceph_msgr_exit(); 209 } 210 EXPORT_SYMBOL(ceph_msgr_exit); 211 212 void ceph_msgr_flush(void) 213 { 214 flush_workqueue(ceph_msgr_wq); 215 } 216 EXPORT_SYMBOL(ceph_msgr_flush); 217 218 /* Connection socket state transition functions */ 219 220 static void con_sock_state_init(struct ceph_connection *con) 221 { 222 int old_state; 223 224 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 225 if (WARN_ON(old_state != CON_SOCK_STATE_NEW)) 226 printk("%s: unexpected old state %d\n", __func__, old_state); 227 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 228 CON_SOCK_STATE_CLOSED); 229 } 230 231 static void con_sock_state_connecting(struct ceph_connection *con) 232 { 233 int old_state; 234 235 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING); 236 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED)) 237 printk("%s: unexpected old state %d\n", __func__, old_state); 238 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 239 CON_SOCK_STATE_CONNECTING); 240 } 241 242 static void con_sock_state_connected(struct ceph_connection *con) 243 { 244 int old_state; 245 246 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED); 247 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING)) 248 printk("%s: unexpected old state %d\n", __func__, old_state); 249 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 250 CON_SOCK_STATE_CONNECTED); 251 } 252 253 static void con_sock_state_closing(struct ceph_connection *con) 254 { 255 int old_state; 256 257 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING); 258 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING && 259 old_state != CON_SOCK_STATE_CONNECTED && 260 old_state != CON_SOCK_STATE_CLOSING)) 261 printk("%s: unexpected old state %d\n", __func__, old_state); 262 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 263 CON_SOCK_STATE_CLOSING); 264 } 265 266 static void con_sock_state_closed(struct ceph_connection *con) 267 { 268 int old_state; 269 270 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 271 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED && 272 old_state != CON_SOCK_STATE_CLOSING && 273 old_state != CON_SOCK_STATE_CONNECTING && 274 old_state != CON_SOCK_STATE_CLOSED)) 275 printk("%s: unexpected old state %d\n", __func__, old_state); 276 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 277 CON_SOCK_STATE_CLOSED); 278 } 279 280 /* 281 * socket callback functions 282 */ 283 284 /* data available on socket, or listen socket received a connect */ 285 static void ceph_sock_data_ready(struct sock *sk, int count_unused) 286 { 287 struct ceph_connection *con = sk->sk_user_data; 288 if (atomic_read(&con->msgr->stopping)) { 289 return; 290 } 291 292 if (sk->sk_state != TCP_CLOSE_WAIT) { 293 dout("%s on %p state = %lu, queueing work\n", __func__, 294 con, con->state); 295 queue_con(con); 296 } 297 } 298 299 /* socket has buffer space for writing */ 300 static void ceph_sock_write_space(struct sock *sk) 301 { 302 struct ceph_connection *con = sk->sk_user_data; 303 304 /* only queue to workqueue if there is data we want to write, 305 * and there is sufficient space in the socket buffer to accept 306 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space() 307 * doesn't get called again until try_write() fills the socket 308 * buffer. See net/ipv4/tcp_input.c:tcp_check_space() 309 * and net/core/stream.c:sk_stream_write_space(). 310 */ 311 if (test_bit(CON_FLAG_WRITE_PENDING, &con->flags)) { 312 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { 313 dout("%s %p queueing write work\n", __func__, con); 314 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 315 queue_con(con); 316 } 317 } else { 318 dout("%s %p nothing to write\n", __func__, con); 319 } 320 } 321 322 /* socket's state has changed */ 323 static void ceph_sock_state_change(struct sock *sk) 324 { 325 struct ceph_connection *con = sk->sk_user_data; 326 327 dout("%s %p state = %lu sk_state = %u\n", __func__, 328 con, con->state, sk->sk_state); 329 330 switch (sk->sk_state) { 331 case TCP_CLOSE: 332 dout("%s TCP_CLOSE\n", __func__); 333 case TCP_CLOSE_WAIT: 334 dout("%s TCP_CLOSE_WAIT\n", __func__); 335 con_sock_state_closing(con); 336 set_bit(CON_FLAG_SOCK_CLOSED, &con->flags); 337 queue_con(con); 338 break; 339 case TCP_ESTABLISHED: 340 dout("%s TCP_ESTABLISHED\n", __func__); 341 con_sock_state_connected(con); 342 queue_con(con); 343 break; 344 default: /* Everything else is uninteresting */ 345 break; 346 } 347 } 348 349 /* 350 * set up socket callbacks 351 */ 352 static void set_sock_callbacks(struct socket *sock, 353 struct ceph_connection *con) 354 { 355 struct sock *sk = sock->sk; 356 sk->sk_user_data = con; 357 sk->sk_data_ready = ceph_sock_data_ready; 358 sk->sk_write_space = ceph_sock_write_space; 359 sk->sk_state_change = ceph_sock_state_change; 360 } 361 362 363 /* 364 * socket helpers 365 */ 366 367 /* 368 * initiate connection to a remote socket. 369 */ 370 static int ceph_tcp_connect(struct ceph_connection *con) 371 { 372 struct sockaddr_storage *paddr = &con->peer_addr.in_addr; 373 struct socket *sock; 374 int ret; 375 376 BUG_ON(con->sock); 377 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM, 378 IPPROTO_TCP, &sock); 379 if (ret) 380 return ret; 381 sock->sk->sk_allocation = GFP_NOFS; 382 383 #ifdef CONFIG_LOCKDEP 384 lockdep_set_class(&sock->sk->sk_lock, &socket_class); 385 #endif 386 387 set_sock_callbacks(sock, con); 388 389 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr)); 390 391 con_sock_state_connecting(con); 392 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), 393 O_NONBLOCK); 394 if (ret == -EINPROGRESS) { 395 dout("connect %s EINPROGRESS sk_state = %u\n", 396 ceph_pr_addr(&con->peer_addr.in_addr), 397 sock->sk->sk_state); 398 } else if (ret < 0) { 399 pr_err("connect %s error %d\n", 400 ceph_pr_addr(&con->peer_addr.in_addr), ret); 401 sock_release(sock); 402 con->error_msg = "connect error"; 403 404 return ret; 405 } 406 con->sock = sock; 407 return 0; 408 } 409 410 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) 411 { 412 struct kvec iov = {buf, len}; 413 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 414 int r; 415 416 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); 417 if (r == -EAGAIN) 418 r = 0; 419 return r; 420 } 421 422 /* 423 * write something. @more is true if caller will be sending more data 424 * shortly. 425 */ 426 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, 427 size_t kvlen, size_t len, int more) 428 { 429 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 430 int r; 431 432 if (more) 433 msg.msg_flags |= MSG_MORE; 434 else 435 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 436 437 r = kernel_sendmsg(sock, &msg, iov, kvlen, len); 438 if (r == -EAGAIN) 439 r = 0; 440 return r; 441 } 442 443 static int ceph_tcp_sendpage(struct socket *sock, struct page *page, 444 int offset, size_t size, int more) 445 { 446 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR); 447 int ret; 448 449 ret = kernel_sendpage(sock, page, offset, size, flags); 450 if (ret == -EAGAIN) 451 ret = 0; 452 453 return ret; 454 } 455 456 457 /* 458 * Shutdown/close the socket for the given connection. 459 */ 460 static int con_close_socket(struct ceph_connection *con) 461 { 462 int rc = 0; 463 464 dout("con_close_socket on %p sock %p\n", con, con->sock); 465 if (con->sock) { 466 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR); 467 sock_release(con->sock); 468 con->sock = NULL; 469 } 470 471 /* 472 * Forcibly clear the SOCK_CLOSED flag. It gets set 473 * independent of the connection mutex, and we could have 474 * received a socket close event before we had the chance to 475 * shut the socket down. 476 */ 477 clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags); 478 479 con_sock_state_closed(con); 480 return rc; 481 } 482 483 /* 484 * Reset a connection. Discard all incoming and outgoing messages 485 * and clear *_seq state. 486 */ 487 static void ceph_msg_remove(struct ceph_msg *msg) 488 { 489 list_del_init(&msg->list_head); 490 BUG_ON(msg->con == NULL); 491 msg->con->ops->put(msg->con); 492 msg->con = NULL; 493 494 ceph_msg_put(msg); 495 } 496 static void ceph_msg_remove_list(struct list_head *head) 497 { 498 while (!list_empty(head)) { 499 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg, 500 list_head); 501 ceph_msg_remove(msg); 502 } 503 } 504 505 static void reset_connection(struct ceph_connection *con) 506 { 507 /* reset connection, out_queue, msg_ and connect_seq */ 508 /* discard existing out_queue and msg_seq */ 509 ceph_msg_remove_list(&con->out_queue); 510 ceph_msg_remove_list(&con->out_sent); 511 512 if (con->in_msg) { 513 BUG_ON(con->in_msg->con != con); 514 con->in_msg->con = NULL; 515 ceph_msg_put(con->in_msg); 516 con->in_msg = NULL; 517 con->ops->put(con); 518 } 519 520 con->connect_seq = 0; 521 con->out_seq = 0; 522 if (con->out_msg) { 523 ceph_msg_put(con->out_msg); 524 con->out_msg = NULL; 525 } 526 con->in_seq = 0; 527 con->in_seq_acked = 0; 528 } 529 530 /* 531 * mark a peer down. drop any open connections. 532 */ 533 void ceph_con_close(struct ceph_connection *con) 534 { 535 mutex_lock(&con->mutex); 536 dout("con_close %p peer %s\n", con, 537 ceph_pr_addr(&con->peer_addr.in_addr)); 538 con->state = CON_STATE_CLOSED; 539 540 clear_bit(CON_FLAG_LOSSYTX, &con->flags); /* so we retry next connect */ 541 clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags); 542 clear_bit(CON_FLAG_WRITE_PENDING, &con->flags); 543 clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags); 544 clear_bit(CON_FLAG_BACKOFF, &con->flags); 545 546 reset_connection(con); 547 con->peer_global_seq = 0; 548 cancel_delayed_work(&con->work); 549 con_close_socket(con); 550 mutex_unlock(&con->mutex); 551 } 552 EXPORT_SYMBOL(ceph_con_close); 553 554 /* 555 * Reopen a closed connection, with a new peer address. 556 */ 557 void ceph_con_open(struct ceph_connection *con, 558 __u8 entity_type, __u64 entity_num, 559 struct ceph_entity_addr *addr) 560 { 561 mutex_lock(&con->mutex); 562 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr)); 563 564 BUG_ON(con->state != CON_STATE_CLOSED); 565 con->state = CON_STATE_PREOPEN; 566 567 con->peer_name.type = (__u8) entity_type; 568 con->peer_name.num = cpu_to_le64(entity_num); 569 570 memcpy(&con->peer_addr, addr, sizeof(*addr)); 571 con->delay = 0; /* reset backoff memory */ 572 mutex_unlock(&con->mutex); 573 queue_con(con); 574 } 575 EXPORT_SYMBOL(ceph_con_open); 576 577 /* 578 * return true if this connection ever successfully opened 579 */ 580 bool ceph_con_opened(struct ceph_connection *con) 581 { 582 return con->connect_seq > 0; 583 } 584 585 /* 586 * initialize a new connection. 587 */ 588 void ceph_con_init(struct ceph_connection *con, void *private, 589 const struct ceph_connection_operations *ops, 590 struct ceph_messenger *msgr) 591 { 592 dout("con_init %p\n", con); 593 memset(con, 0, sizeof(*con)); 594 con->private = private; 595 con->ops = ops; 596 con->msgr = msgr; 597 598 con_sock_state_init(con); 599 600 mutex_init(&con->mutex); 601 INIT_LIST_HEAD(&con->out_queue); 602 INIT_LIST_HEAD(&con->out_sent); 603 INIT_DELAYED_WORK(&con->work, con_work); 604 605 con->state = CON_STATE_CLOSED; 606 } 607 EXPORT_SYMBOL(ceph_con_init); 608 609 610 /* 611 * We maintain a global counter to order connection attempts. Get 612 * a unique seq greater than @gt. 613 */ 614 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt) 615 { 616 u32 ret; 617 618 spin_lock(&msgr->global_seq_lock); 619 if (msgr->global_seq < gt) 620 msgr->global_seq = gt; 621 ret = ++msgr->global_seq; 622 spin_unlock(&msgr->global_seq_lock); 623 return ret; 624 } 625 626 static void con_out_kvec_reset(struct ceph_connection *con) 627 { 628 con->out_kvec_left = 0; 629 con->out_kvec_bytes = 0; 630 con->out_kvec_cur = &con->out_kvec[0]; 631 } 632 633 static void con_out_kvec_add(struct ceph_connection *con, 634 size_t size, void *data) 635 { 636 int index; 637 638 index = con->out_kvec_left; 639 BUG_ON(index >= ARRAY_SIZE(con->out_kvec)); 640 641 con->out_kvec[index].iov_len = size; 642 con->out_kvec[index].iov_base = data; 643 con->out_kvec_left++; 644 con->out_kvec_bytes += size; 645 } 646 647 #ifdef CONFIG_BLOCK 648 static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg) 649 { 650 if (!bio) { 651 *iter = NULL; 652 *seg = 0; 653 return; 654 } 655 *iter = bio; 656 *seg = bio->bi_idx; 657 } 658 659 static void iter_bio_next(struct bio **bio_iter, int *seg) 660 { 661 if (*bio_iter == NULL) 662 return; 663 664 BUG_ON(*seg >= (*bio_iter)->bi_vcnt); 665 666 (*seg)++; 667 if (*seg == (*bio_iter)->bi_vcnt) 668 init_bio_iter((*bio_iter)->bi_next, bio_iter, seg); 669 } 670 #endif 671 672 static void prepare_write_message_data(struct ceph_connection *con) 673 { 674 struct ceph_msg *msg = con->out_msg; 675 676 BUG_ON(!msg); 677 BUG_ON(!msg->hdr.data_len); 678 679 /* initialize page iterator */ 680 con->out_msg_pos.page = 0; 681 if (msg->pages) 682 con->out_msg_pos.page_pos = msg->page_alignment; 683 else 684 con->out_msg_pos.page_pos = 0; 685 #ifdef CONFIG_BLOCK 686 if (msg->bio) 687 init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg); 688 #endif 689 con->out_msg_pos.data_pos = 0; 690 con->out_msg_pos.did_page_crc = false; 691 con->out_more = 1; /* data + footer will follow */ 692 } 693 694 /* 695 * Prepare footer for currently outgoing message, and finish things 696 * off. Assumes out_kvec* are already valid.. we just add on to the end. 697 */ 698 static void prepare_write_message_footer(struct ceph_connection *con) 699 { 700 struct ceph_msg *m = con->out_msg; 701 int v = con->out_kvec_left; 702 703 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE; 704 705 dout("prepare_write_message_footer %p\n", con); 706 con->out_kvec_is_msg = true; 707 con->out_kvec[v].iov_base = &m->footer; 708 con->out_kvec[v].iov_len = sizeof(m->footer); 709 con->out_kvec_bytes += sizeof(m->footer); 710 con->out_kvec_left++; 711 con->out_more = m->more_to_follow; 712 con->out_msg_done = true; 713 } 714 715 /* 716 * Prepare headers for the next outgoing message. 717 */ 718 static void prepare_write_message(struct ceph_connection *con) 719 { 720 struct ceph_msg *m; 721 u32 crc; 722 723 con_out_kvec_reset(con); 724 con->out_kvec_is_msg = true; 725 con->out_msg_done = false; 726 727 /* Sneak an ack in there first? If we can get it into the same 728 * TCP packet that's a good thing. */ 729 if (con->in_seq > con->in_seq_acked) { 730 con->in_seq_acked = con->in_seq; 731 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 732 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 733 con_out_kvec_add(con, sizeof (con->out_temp_ack), 734 &con->out_temp_ack); 735 } 736 737 BUG_ON(list_empty(&con->out_queue)); 738 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head); 739 con->out_msg = m; 740 BUG_ON(m->con != con); 741 742 /* put message on sent list */ 743 ceph_msg_get(m); 744 list_move_tail(&m->list_head, &con->out_sent); 745 746 /* 747 * only assign outgoing seq # if we haven't sent this message 748 * yet. if it is requeued, resend with it's original seq. 749 */ 750 if (m->needs_out_seq) { 751 m->hdr.seq = cpu_to_le64(++con->out_seq); 752 m->needs_out_seq = false; 753 } 754 #ifdef CONFIG_BLOCK 755 else 756 m->bio_iter = NULL; 757 #endif 758 759 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n", 760 m, con->out_seq, le16_to_cpu(m->hdr.type), 761 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), 762 le32_to_cpu(m->hdr.data_len), 763 m->nr_pages); 764 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); 765 766 /* tag + hdr + front + middle */ 767 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg); 768 con_out_kvec_add(con, sizeof (m->hdr), &m->hdr); 769 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base); 770 771 if (m->middle) 772 con_out_kvec_add(con, m->middle->vec.iov_len, 773 m->middle->vec.iov_base); 774 775 /* fill in crc (except data pages), footer */ 776 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc)); 777 con->out_msg->hdr.crc = cpu_to_le32(crc); 778 con->out_msg->footer.flags = 0; 779 780 crc = crc32c(0, m->front.iov_base, m->front.iov_len); 781 con->out_msg->footer.front_crc = cpu_to_le32(crc); 782 if (m->middle) { 783 crc = crc32c(0, m->middle->vec.iov_base, 784 m->middle->vec.iov_len); 785 con->out_msg->footer.middle_crc = cpu_to_le32(crc); 786 } else 787 con->out_msg->footer.middle_crc = 0; 788 dout("%s front_crc %u middle_crc %u\n", __func__, 789 le32_to_cpu(con->out_msg->footer.front_crc), 790 le32_to_cpu(con->out_msg->footer.middle_crc)); 791 792 /* is there a data payload? */ 793 con->out_msg->footer.data_crc = 0; 794 if (m->hdr.data_len) 795 prepare_write_message_data(con); 796 else 797 /* no, queue up footer too and be done */ 798 prepare_write_message_footer(con); 799 800 set_bit(CON_FLAG_WRITE_PENDING, &con->flags); 801 } 802 803 /* 804 * Prepare an ack. 805 */ 806 static void prepare_write_ack(struct ceph_connection *con) 807 { 808 dout("prepare_write_ack %p %llu -> %llu\n", con, 809 con->in_seq_acked, con->in_seq); 810 con->in_seq_acked = con->in_seq; 811 812 con_out_kvec_reset(con); 813 814 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 815 816 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 817 con_out_kvec_add(con, sizeof (con->out_temp_ack), 818 &con->out_temp_ack); 819 820 con->out_more = 1; /* more will follow.. eventually.. */ 821 set_bit(CON_FLAG_WRITE_PENDING, &con->flags); 822 } 823 824 /* 825 * Prepare to write keepalive byte. 826 */ 827 static void prepare_write_keepalive(struct ceph_connection *con) 828 { 829 dout("prepare_write_keepalive %p\n", con); 830 con_out_kvec_reset(con); 831 con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive); 832 set_bit(CON_FLAG_WRITE_PENDING, &con->flags); 833 } 834 835 /* 836 * Connection negotiation. 837 */ 838 839 static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con, 840 int *auth_proto) 841 { 842 struct ceph_auth_handshake *auth; 843 844 if (!con->ops->get_authorizer) { 845 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN; 846 con->out_connect.authorizer_len = 0; 847 return NULL; 848 } 849 850 /* Can't hold the mutex while getting authorizer */ 851 mutex_unlock(&con->mutex); 852 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry); 853 mutex_lock(&con->mutex); 854 855 if (IS_ERR(auth)) 856 return auth; 857 if (con->state != CON_STATE_NEGOTIATING) 858 return ERR_PTR(-EAGAIN); 859 860 con->auth_reply_buf = auth->authorizer_reply_buf; 861 con->auth_reply_buf_len = auth->authorizer_reply_buf_len; 862 return auth; 863 } 864 865 /* 866 * We connected to a peer and are saying hello. 867 */ 868 static void prepare_write_banner(struct ceph_connection *con) 869 { 870 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER); 871 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr), 872 &con->msgr->my_enc_addr); 873 874 con->out_more = 0; 875 set_bit(CON_FLAG_WRITE_PENDING, &con->flags); 876 } 877 878 static int prepare_write_connect(struct ceph_connection *con) 879 { 880 unsigned int global_seq = get_global_seq(con->msgr, 0); 881 int proto; 882 int auth_proto; 883 struct ceph_auth_handshake *auth; 884 885 switch (con->peer_name.type) { 886 case CEPH_ENTITY_TYPE_MON: 887 proto = CEPH_MONC_PROTOCOL; 888 break; 889 case CEPH_ENTITY_TYPE_OSD: 890 proto = CEPH_OSDC_PROTOCOL; 891 break; 892 case CEPH_ENTITY_TYPE_MDS: 893 proto = CEPH_MDSC_PROTOCOL; 894 break; 895 default: 896 BUG(); 897 } 898 899 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, 900 con->connect_seq, global_seq, proto); 901 902 con->out_connect.features = cpu_to_le64(con->msgr->supported_features); 903 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); 904 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); 905 con->out_connect.global_seq = cpu_to_le32(global_seq); 906 con->out_connect.protocol_version = cpu_to_le32(proto); 907 con->out_connect.flags = 0; 908 909 auth_proto = CEPH_AUTH_UNKNOWN; 910 auth = get_connect_authorizer(con, &auth_proto); 911 if (IS_ERR(auth)) 912 return PTR_ERR(auth); 913 914 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto); 915 con->out_connect.authorizer_len = auth ? 916 cpu_to_le32(auth->authorizer_buf_len) : 0; 917 918 con_out_kvec_add(con, sizeof (con->out_connect), 919 &con->out_connect); 920 if (auth && auth->authorizer_buf_len) 921 con_out_kvec_add(con, auth->authorizer_buf_len, 922 auth->authorizer_buf); 923 924 con->out_more = 0; 925 set_bit(CON_FLAG_WRITE_PENDING, &con->flags); 926 927 return 0; 928 } 929 930 /* 931 * write as much of pending kvecs to the socket as we can. 932 * 1 -> done 933 * 0 -> socket full, but more to do 934 * <0 -> error 935 */ 936 static int write_partial_kvec(struct ceph_connection *con) 937 { 938 int ret; 939 940 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes); 941 while (con->out_kvec_bytes > 0) { 942 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur, 943 con->out_kvec_left, con->out_kvec_bytes, 944 con->out_more); 945 if (ret <= 0) 946 goto out; 947 con->out_kvec_bytes -= ret; 948 if (con->out_kvec_bytes == 0) 949 break; /* done */ 950 951 /* account for full iov entries consumed */ 952 while (ret >= con->out_kvec_cur->iov_len) { 953 BUG_ON(!con->out_kvec_left); 954 ret -= con->out_kvec_cur->iov_len; 955 con->out_kvec_cur++; 956 con->out_kvec_left--; 957 } 958 /* and for a partially-consumed entry */ 959 if (ret) { 960 con->out_kvec_cur->iov_len -= ret; 961 con->out_kvec_cur->iov_base += ret; 962 } 963 } 964 con->out_kvec_left = 0; 965 con->out_kvec_is_msg = false; 966 ret = 1; 967 out: 968 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con, 969 con->out_kvec_bytes, con->out_kvec_left, ret); 970 return ret; /* done! */ 971 } 972 973 static void out_msg_pos_next(struct ceph_connection *con, struct page *page, 974 size_t len, size_t sent, bool in_trail) 975 { 976 struct ceph_msg *msg = con->out_msg; 977 978 BUG_ON(!msg); 979 BUG_ON(!sent); 980 981 con->out_msg_pos.data_pos += sent; 982 con->out_msg_pos.page_pos += sent; 983 if (sent < len) 984 return; 985 986 BUG_ON(sent != len); 987 con->out_msg_pos.page_pos = 0; 988 con->out_msg_pos.page++; 989 con->out_msg_pos.did_page_crc = false; 990 if (in_trail) 991 list_move_tail(&page->lru, 992 &msg->trail->head); 993 else if (msg->pagelist) 994 list_move_tail(&page->lru, 995 &msg->pagelist->head); 996 #ifdef CONFIG_BLOCK 997 else if (msg->bio) 998 iter_bio_next(&msg->bio_iter, &msg->bio_seg); 999 #endif 1000 } 1001 1002 /* 1003 * Write as much message data payload as we can. If we finish, queue 1004 * up the footer. 1005 * 1 -> done, footer is now queued in out_kvec[]. 1006 * 0 -> socket full, but more to do 1007 * <0 -> error 1008 */ 1009 static int write_partial_msg_pages(struct ceph_connection *con) 1010 { 1011 struct ceph_msg *msg = con->out_msg; 1012 unsigned int data_len = le32_to_cpu(msg->hdr.data_len); 1013 size_t len; 1014 bool do_datacrc = !con->msgr->nocrc; 1015 int ret; 1016 int total_max_write; 1017 bool in_trail = false; 1018 const size_t trail_len = (msg->trail ? msg->trail->length : 0); 1019 const size_t trail_off = data_len - trail_len; 1020 1021 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n", 1022 con, msg, con->out_msg_pos.page, msg->nr_pages, 1023 con->out_msg_pos.page_pos); 1024 1025 /* 1026 * Iterate through each page that contains data to be 1027 * written, and send as much as possible for each. 1028 * 1029 * If we are calculating the data crc (the default), we will 1030 * need to map the page. If we have no pages, they have 1031 * been revoked, so use the zero page. 1032 */ 1033 while (data_len > con->out_msg_pos.data_pos) { 1034 struct page *page = NULL; 1035 int max_write = PAGE_SIZE; 1036 int bio_offset = 0; 1037 1038 in_trail = in_trail || con->out_msg_pos.data_pos >= trail_off; 1039 if (!in_trail) 1040 total_max_write = trail_off - con->out_msg_pos.data_pos; 1041 1042 if (in_trail) { 1043 total_max_write = data_len - con->out_msg_pos.data_pos; 1044 1045 page = list_first_entry(&msg->trail->head, 1046 struct page, lru); 1047 } else if (msg->pages) { 1048 page = msg->pages[con->out_msg_pos.page]; 1049 } else if (msg->pagelist) { 1050 page = list_first_entry(&msg->pagelist->head, 1051 struct page, lru); 1052 #ifdef CONFIG_BLOCK 1053 } else if (msg->bio) { 1054 struct bio_vec *bv; 1055 1056 bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg); 1057 page = bv->bv_page; 1058 bio_offset = bv->bv_offset; 1059 max_write = bv->bv_len; 1060 #endif 1061 } else { 1062 page = zero_page; 1063 } 1064 len = min_t(int, max_write - con->out_msg_pos.page_pos, 1065 total_max_write); 1066 1067 if (do_datacrc && !con->out_msg_pos.did_page_crc) { 1068 void *base; 1069 u32 crc = le32_to_cpu(msg->footer.data_crc); 1070 char *kaddr; 1071 1072 kaddr = kmap(page); 1073 BUG_ON(kaddr == NULL); 1074 base = kaddr + con->out_msg_pos.page_pos + bio_offset; 1075 crc = crc32c(crc, base, len); 1076 msg->footer.data_crc = cpu_to_le32(crc); 1077 con->out_msg_pos.did_page_crc = true; 1078 } 1079 ret = ceph_tcp_sendpage(con->sock, page, 1080 con->out_msg_pos.page_pos + bio_offset, 1081 len, 1); 1082 1083 if (do_datacrc) 1084 kunmap(page); 1085 1086 if (ret <= 0) 1087 goto out; 1088 1089 out_msg_pos_next(con, page, len, (size_t) ret, in_trail); 1090 } 1091 1092 dout("write_partial_msg_pages %p msg %p done\n", con, msg); 1093 1094 /* prepare and queue up footer, too */ 1095 if (!do_datacrc) 1096 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; 1097 con_out_kvec_reset(con); 1098 prepare_write_message_footer(con); 1099 ret = 1; 1100 out: 1101 return ret; 1102 } 1103 1104 /* 1105 * write some zeros 1106 */ 1107 static int write_partial_skip(struct ceph_connection *con) 1108 { 1109 int ret; 1110 1111 while (con->out_skip > 0) { 1112 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE); 1113 1114 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, 1); 1115 if (ret <= 0) 1116 goto out; 1117 con->out_skip -= ret; 1118 } 1119 ret = 1; 1120 out: 1121 return ret; 1122 } 1123 1124 /* 1125 * Prepare to read connection handshake, or an ack. 1126 */ 1127 static void prepare_read_banner(struct ceph_connection *con) 1128 { 1129 dout("prepare_read_banner %p\n", con); 1130 con->in_base_pos = 0; 1131 } 1132 1133 static void prepare_read_connect(struct ceph_connection *con) 1134 { 1135 dout("prepare_read_connect %p\n", con); 1136 con->in_base_pos = 0; 1137 } 1138 1139 static void prepare_read_ack(struct ceph_connection *con) 1140 { 1141 dout("prepare_read_ack %p\n", con); 1142 con->in_base_pos = 0; 1143 } 1144 1145 static void prepare_read_tag(struct ceph_connection *con) 1146 { 1147 dout("prepare_read_tag %p\n", con); 1148 con->in_base_pos = 0; 1149 con->in_tag = CEPH_MSGR_TAG_READY; 1150 } 1151 1152 /* 1153 * Prepare to read a message. 1154 */ 1155 static int prepare_read_message(struct ceph_connection *con) 1156 { 1157 dout("prepare_read_message %p\n", con); 1158 BUG_ON(con->in_msg != NULL); 1159 con->in_base_pos = 0; 1160 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0; 1161 return 0; 1162 } 1163 1164 1165 static int read_partial(struct ceph_connection *con, 1166 int end, int size, void *object) 1167 { 1168 while (con->in_base_pos < end) { 1169 int left = end - con->in_base_pos; 1170 int have = size - left; 1171 int ret = ceph_tcp_recvmsg(con->sock, object + have, left); 1172 if (ret <= 0) 1173 return ret; 1174 con->in_base_pos += ret; 1175 } 1176 return 1; 1177 } 1178 1179 1180 /* 1181 * Read all or part of the connect-side handshake on a new connection 1182 */ 1183 static int read_partial_banner(struct ceph_connection *con) 1184 { 1185 int size; 1186 int end; 1187 int ret; 1188 1189 dout("read_partial_banner %p at %d\n", con, con->in_base_pos); 1190 1191 /* peer's banner */ 1192 size = strlen(CEPH_BANNER); 1193 end = size; 1194 ret = read_partial(con, end, size, con->in_banner); 1195 if (ret <= 0) 1196 goto out; 1197 1198 size = sizeof (con->actual_peer_addr); 1199 end += size; 1200 ret = read_partial(con, end, size, &con->actual_peer_addr); 1201 if (ret <= 0) 1202 goto out; 1203 1204 size = sizeof (con->peer_addr_for_me); 1205 end += size; 1206 ret = read_partial(con, end, size, &con->peer_addr_for_me); 1207 if (ret <= 0) 1208 goto out; 1209 1210 out: 1211 return ret; 1212 } 1213 1214 static int read_partial_connect(struct ceph_connection *con) 1215 { 1216 int size; 1217 int end; 1218 int ret; 1219 1220 dout("read_partial_connect %p at %d\n", con, con->in_base_pos); 1221 1222 size = sizeof (con->in_reply); 1223 end = size; 1224 ret = read_partial(con, end, size, &con->in_reply); 1225 if (ret <= 0) 1226 goto out; 1227 1228 size = le32_to_cpu(con->in_reply.authorizer_len); 1229 end += size; 1230 ret = read_partial(con, end, size, con->auth_reply_buf); 1231 if (ret <= 0) 1232 goto out; 1233 1234 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n", 1235 con, (int)con->in_reply.tag, 1236 le32_to_cpu(con->in_reply.connect_seq), 1237 le32_to_cpu(con->in_reply.global_seq)); 1238 out: 1239 return ret; 1240 1241 } 1242 1243 /* 1244 * Verify the hello banner looks okay. 1245 */ 1246 static int verify_hello(struct ceph_connection *con) 1247 { 1248 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) { 1249 pr_err("connect to %s got bad banner\n", 1250 ceph_pr_addr(&con->peer_addr.in_addr)); 1251 con->error_msg = "protocol error, bad banner"; 1252 return -1; 1253 } 1254 return 0; 1255 } 1256 1257 static bool addr_is_blank(struct sockaddr_storage *ss) 1258 { 1259 switch (ss->ss_family) { 1260 case AF_INET: 1261 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0; 1262 case AF_INET6: 1263 return 1264 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 && 1265 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 && 1266 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 && 1267 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0; 1268 } 1269 return false; 1270 } 1271 1272 static int addr_port(struct sockaddr_storage *ss) 1273 { 1274 switch (ss->ss_family) { 1275 case AF_INET: 1276 return ntohs(((struct sockaddr_in *)ss)->sin_port); 1277 case AF_INET6: 1278 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port); 1279 } 1280 return 0; 1281 } 1282 1283 static void addr_set_port(struct sockaddr_storage *ss, int p) 1284 { 1285 switch (ss->ss_family) { 1286 case AF_INET: 1287 ((struct sockaddr_in *)ss)->sin_port = htons(p); 1288 break; 1289 case AF_INET6: 1290 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p); 1291 break; 1292 } 1293 } 1294 1295 /* 1296 * Unlike other *_pton function semantics, zero indicates success. 1297 */ 1298 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss, 1299 char delim, const char **ipend) 1300 { 1301 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 1302 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 1303 1304 memset(ss, 0, sizeof(*ss)); 1305 1306 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) { 1307 ss->ss_family = AF_INET; 1308 return 0; 1309 } 1310 1311 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) { 1312 ss->ss_family = AF_INET6; 1313 return 0; 1314 } 1315 1316 return -EINVAL; 1317 } 1318 1319 /* 1320 * Extract hostname string and resolve using kernel DNS facility. 1321 */ 1322 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER 1323 static int ceph_dns_resolve_name(const char *name, size_t namelen, 1324 struct sockaddr_storage *ss, char delim, const char **ipend) 1325 { 1326 const char *end, *delim_p; 1327 char *colon_p, *ip_addr = NULL; 1328 int ip_len, ret; 1329 1330 /* 1331 * The end of the hostname occurs immediately preceding the delimiter or 1332 * the port marker (':') where the delimiter takes precedence. 1333 */ 1334 delim_p = memchr(name, delim, namelen); 1335 colon_p = memchr(name, ':', namelen); 1336 1337 if (delim_p && colon_p) 1338 end = delim_p < colon_p ? delim_p : colon_p; 1339 else if (!delim_p && colon_p) 1340 end = colon_p; 1341 else { 1342 end = delim_p; 1343 if (!end) /* case: hostname:/ */ 1344 end = name + namelen; 1345 } 1346 1347 if (end <= name) 1348 return -EINVAL; 1349 1350 /* do dns_resolve upcall */ 1351 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL); 1352 if (ip_len > 0) 1353 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL); 1354 else 1355 ret = -ESRCH; 1356 1357 kfree(ip_addr); 1358 1359 *ipend = end; 1360 1361 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name, 1362 ret, ret ? "failed" : ceph_pr_addr(ss)); 1363 1364 return ret; 1365 } 1366 #else 1367 static inline int ceph_dns_resolve_name(const char *name, size_t namelen, 1368 struct sockaddr_storage *ss, char delim, const char **ipend) 1369 { 1370 return -EINVAL; 1371 } 1372 #endif 1373 1374 /* 1375 * Parse a server name (IP or hostname). If a valid IP address is not found 1376 * then try to extract a hostname to resolve using userspace DNS upcall. 1377 */ 1378 static int ceph_parse_server_name(const char *name, size_t namelen, 1379 struct sockaddr_storage *ss, char delim, const char **ipend) 1380 { 1381 int ret; 1382 1383 ret = ceph_pton(name, namelen, ss, delim, ipend); 1384 if (ret) 1385 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend); 1386 1387 return ret; 1388 } 1389 1390 /* 1391 * Parse an ip[:port] list into an addr array. Use the default 1392 * monitor port if a port isn't specified. 1393 */ 1394 int ceph_parse_ips(const char *c, const char *end, 1395 struct ceph_entity_addr *addr, 1396 int max_count, int *count) 1397 { 1398 int i, ret = -EINVAL; 1399 const char *p = c; 1400 1401 dout("parse_ips on '%.*s'\n", (int)(end-c), c); 1402 for (i = 0; i < max_count; i++) { 1403 const char *ipend; 1404 struct sockaddr_storage *ss = &addr[i].in_addr; 1405 int port; 1406 char delim = ','; 1407 1408 if (*p == '[') { 1409 delim = ']'; 1410 p++; 1411 } 1412 1413 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend); 1414 if (ret) 1415 goto bad; 1416 ret = -EINVAL; 1417 1418 p = ipend; 1419 1420 if (delim == ']') { 1421 if (*p != ']') { 1422 dout("missing matching ']'\n"); 1423 goto bad; 1424 } 1425 p++; 1426 } 1427 1428 /* port? */ 1429 if (p < end && *p == ':') { 1430 port = 0; 1431 p++; 1432 while (p < end && *p >= '0' && *p <= '9') { 1433 port = (port * 10) + (*p - '0'); 1434 p++; 1435 } 1436 if (port > 65535 || port == 0) 1437 goto bad; 1438 } else { 1439 port = CEPH_MON_PORT; 1440 } 1441 1442 addr_set_port(ss, port); 1443 1444 dout("parse_ips got %s\n", ceph_pr_addr(ss)); 1445 1446 if (p == end) 1447 break; 1448 if (*p != ',') 1449 goto bad; 1450 p++; 1451 } 1452 1453 if (p != end) 1454 goto bad; 1455 1456 if (count) 1457 *count = i + 1; 1458 return 0; 1459 1460 bad: 1461 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); 1462 return ret; 1463 } 1464 EXPORT_SYMBOL(ceph_parse_ips); 1465 1466 static int process_banner(struct ceph_connection *con) 1467 { 1468 dout("process_banner on %p\n", con); 1469 1470 if (verify_hello(con) < 0) 1471 return -1; 1472 1473 ceph_decode_addr(&con->actual_peer_addr); 1474 ceph_decode_addr(&con->peer_addr_for_me); 1475 1476 /* 1477 * Make sure the other end is who we wanted. note that the other 1478 * end may not yet know their ip address, so if it's 0.0.0.0, give 1479 * them the benefit of the doubt. 1480 */ 1481 if (memcmp(&con->peer_addr, &con->actual_peer_addr, 1482 sizeof(con->peer_addr)) != 0 && 1483 !(addr_is_blank(&con->actual_peer_addr.in_addr) && 1484 con->actual_peer_addr.nonce == con->peer_addr.nonce)) { 1485 pr_warning("wrong peer, want %s/%d, got %s/%d\n", 1486 ceph_pr_addr(&con->peer_addr.in_addr), 1487 (int)le32_to_cpu(con->peer_addr.nonce), 1488 ceph_pr_addr(&con->actual_peer_addr.in_addr), 1489 (int)le32_to_cpu(con->actual_peer_addr.nonce)); 1490 con->error_msg = "wrong peer at address"; 1491 return -1; 1492 } 1493 1494 /* 1495 * did we learn our address? 1496 */ 1497 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) { 1498 int port = addr_port(&con->msgr->inst.addr.in_addr); 1499 1500 memcpy(&con->msgr->inst.addr.in_addr, 1501 &con->peer_addr_for_me.in_addr, 1502 sizeof(con->peer_addr_for_me.in_addr)); 1503 addr_set_port(&con->msgr->inst.addr.in_addr, port); 1504 encode_my_addr(con->msgr); 1505 dout("process_banner learned my addr is %s\n", 1506 ceph_pr_addr(&con->msgr->inst.addr.in_addr)); 1507 } 1508 1509 return 0; 1510 } 1511 1512 static void fail_protocol(struct ceph_connection *con) 1513 { 1514 reset_connection(con); 1515 BUG_ON(con->state != CON_STATE_NEGOTIATING); 1516 con->state = CON_STATE_CLOSED; 1517 } 1518 1519 static int process_connect(struct ceph_connection *con) 1520 { 1521 u64 sup_feat = con->msgr->supported_features; 1522 u64 req_feat = con->msgr->required_features; 1523 u64 server_feat = le64_to_cpu(con->in_reply.features); 1524 int ret; 1525 1526 dout("process_connect on %p tag %d\n", con, (int)con->in_tag); 1527 1528 switch (con->in_reply.tag) { 1529 case CEPH_MSGR_TAG_FEATURES: 1530 pr_err("%s%lld %s feature set mismatch," 1531 " my %llx < server's %llx, missing %llx\n", 1532 ENTITY_NAME(con->peer_name), 1533 ceph_pr_addr(&con->peer_addr.in_addr), 1534 sup_feat, server_feat, server_feat & ~sup_feat); 1535 con->error_msg = "missing required protocol features"; 1536 fail_protocol(con); 1537 return -1; 1538 1539 case CEPH_MSGR_TAG_BADPROTOVER: 1540 pr_err("%s%lld %s protocol version mismatch," 1541 " my %d != server's %d\n", 1542 ENTITY_NAME(con->peer_name), 1543 ceph_pr_addr(&con->peer_addr.in_addr), 1544 le32_to_cpu(con->out_connect.protocol_version), 1545 le32_to_cpu(con->in_reply.protocol_version)); 1546 con->error_msg = "protocol version mismatch"; 1547 fail_protocol(con); 1548 return -1; 1549 1550 case CEPH_MSGR_TAG_BADAUTHORIZER: 1551 con->auth_retry++; 1552 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con, 1553 con->auth_retry); 1554 if (con->auth_retry == 2) { 1555 con->error_msg = "connect authorization failure"; 1556 return -1; 1557 } 1558 con->auth_retry = 1; 1559 con_out_kvec_reset(con); 1560 ret = prepare_write_connect(con); 1561 if (ret < 0) 1562 return ret; 1563 prepare_read_connect(con); 1564 break; 1565 1566 case CEPH_MSGR_TAG_RESETSESSION: 1567 /* 1568 * If we connected with a large connect_seq but the peer 1569 * has no record of a session with us (no connection, or 1570 * connect_seq == 0), they will send RESETSESION to indicate 1571 * that they must have reset their session, and may have 1572 * dropped messages. 1573 */ 1574 dout("process_connect got RESET peer seq %u\n", 1575 le32_to_cpu(con->in_reply.connect_seq)); 1576 pr_err("%s%lld %s connection reset\n", 1577 ENTITY_NAME(con->peer_name), 1578 ceph_pr_addr(&con->peer_addr.in_addr)); 1579 reset_connection(con); 1580 con_out_kvec_reset(con); 1581 ret = prepare_write_connect(con); 1582 if (ret < 0) 1583 return ret; 1584 prepare_read_connect(con); 1585 1586 /* Tell ceph about it. */ 1587 mutex_unlock(&con->mutex); 1588 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name)); 1589 if (con->ops->peer_reset) 1590 con->ops->peer_reset(con); 1591 mutex_lock(&con->mutex); 1592 if (con->state != CON_STATE_NEGOTIATING) 1593 return -EAGAIN; 1594 break; 1595 1596 case CEPH_MSGR_TAG_RETRY_SESSION: 1597 /* 1598 * If we sent a smaller connect_seq than the peer has, try 1599 * again with a larger value. 1600 */ 1601 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n", 1602 le32_to_cpu(con->out_connect.connect_seq), 1603 le32_to_cpu(con->in_reply.connect_seq)); 1604 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq); 1605 con_out_kvec_reset(con); 1606 ret = prepare_write_connect(con); 1607 if (ret < 0) 1608 return ret; 1609 prepare_read_connect(con); 1610 break; 1611 1612 case CEPH_MSGR_TAG_RETRY_GLOBAL: 1613 /* 1614 * If we sent a smaller global_seq than the peer has, try 1615 * again with a larger value. 1616 */ 1617 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n", 1618 con->peer_global_seq, 1619 le32_to_cpu(con->in_reply.global_seq)); 1620 get_global_seq(con->msgr, 1621 le32_to_cpu(con->in_reply.global_seq)); 1622 con_out_kvec_reset(con); 1623 ret = prepare_write_connect(con); 1624 if (ret < 0) 1625 return ret; 1626 prepare_read_connect(con); 1627 break; 1628 1629 case CEPH_MSGR_TAG_READY: 1630 if (req_feat & ~server_feat) { 1631 pr_err("%s%lld %s protocol feature mismatch," 1632 " my required %llx > server's %llx, need %llx\n", 1633 ENTITY_NAME(con->peer_name), 1634 ceph_pr_addr(&con->peer_addr.in_addr), 1635 req_feat, server_feat, req_feat & ~server_feat); 1636 con->error_msg = "missing required protocol features"; 1637 fail_protocol(con); 1638 return -1; 1639 } 1640 1641 BUG_ON(con->state != CON_STATE_NEGOTIATING); 1642 con->state = CON_STATE_OPEN; 1643 1644 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); 1645 con->connect_seq++; 1646 con->peer_features = server_feat; 1647 dout("process_connect got READY gseq %d cseq %d (%d)\n", 1648 con->peer_global_seq, 1649 le32_to_cpu(con->in_reply.connect_seq), 1650 con->connect_seq); 1651 WARN_ON(con->connect_seq != 1652 le32_to_cpu(con->in_reply.connect_seq)); 1653 1654 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) 1655 set_bit(CON_FLAG_LOSSYTX, &con->flags); 1656 1657 con->delay = 0; /* reset backoff memory */ 1658 1659 prepare_read_tag(con); 1660 break; 1661 1662 case CEPH_MSGR_TAG_WAIT: 1663 /* 1664 * If there is a connection race (we are opening 1665 * connections to each other), one of us may just have 1666 * to WAIT. This shouldn't happen if we are the 1667 * client. 1668 */ 1669 pr_err("process_connect got WAIT as client\n"); 1670 con->error_msg = "protocol error, got WAIT as client"; 1671 return -1; 1672 1673 default: 1674 pr_err("connect protocol error, will retry\n"); 1675 con->error_msg = "protocol error, garbage tag during connect"; 1676 return -1; 1677 } 1678 return 0; 1679 } 1680 1681 1682 /* 1683 * read (part of) an ack 1684 */ 1685 static int read_partial_ack(struct ceph_connection *con) 1686 { 1687 int size = sizeof (con->in_temp_ack); 1688 int end = size; 1689 1690 return read_partial(con, end, size, &con->in_temp_ack); 1691 } 1692 1693 1694 /* 1695 * We can finally discard anything that's been acked. 1696 */ 1697 static void process_ack(struct ceph_connection *con) 1698 { 1699 struct ceph_msg *m; 1700 u64 ack = le64_to_cpu(con->in_temp_ack); 1701 u64 seq; 1702 1703 while (!list_empty(&con->out_sent)) { 1704 m = list_first_entry(&con->out_sent, struct ceph_msg, 1705 list_head); 1706 seq = le64_to_cpu(m->hdr.seq); 1707 if (seq > ack) 1708 break; 1709 dout("got ack for seq %llu type %d at %p\n", seq, 1710 le16_to_cpu(m->hdr.type), m); 1711 m->ack_stamp = jiffies; 1712 ceph_msg_remove(m); 1713 } 1714 prepare_read_tag(con); 1715 } 1716 1717 1718 1719 1720 static int read_partial_message_section(struct ceph_connection *con, 1721 struct kvec *section, 1722 unsigned int sec_len, u32 *crc) 1723 { 1724 int ret, left; 1725 1726 BUG_ON(!section); 1727 1728 while (section->iov_len < sec_len) { 1729 BUG_ON(section->iov_base == NULL); 1730 left = sec_len - section->iov_len; 1731 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base + 1732 section->iov_len, left); 1733 if (ret <= 0) 1734 return ret; 1735 section->iov_len += ret; 1736 } 1737 if (section->iov_len == sec_len) 1738 *crc = crc32c(0, section->iov_base, section->iov_len); 1739 1740 return 1; 1741 } 1742 1743 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip); 1744 1745 static int read_partial_message_pages(struct ceph_connection *con, 1746 struct page **pages, 1747 unsigned int data_len, bool do_datacrc) 1748 { 1749 void *p; 1750 int ret; 1751 int left; 1752 1753 left = min((int)(data_len - con->in_msg_pos.data_pos), 1754 (int)(PAGE_SIZE - con->in_msg_pos.page_pos)); 1755 /* (page) data */ 1756 BUG_ON(pages == NULL); 1757 p = kmap(pages[con->in_msg_pos.page]); 1758 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, 1759 left); 1760 if (ret > 0 && do_datacrc) 1761 con->in_data_crc = 1762 crc32c(con->in_data_crc, 1763 p + con->in_msg_pos.page_pos, ret); 1764 kunmap(pages[con->in_msg_pos.page]); 1765 if (ret <= 0) 1766 return ret; 1767 con->in_msg_pos.data_pos += ret; 1768 con->in_msg_pos.page_pos += ret; 1769 if (con->in_msg_pos.page_pos == PAGE_SIZE) { 1770 con->in_msg_pos.page_pos = 0; 1771 con->in_msg_pos.page++; 1772 } 1773 1774 return ret; 1775 } 1776 1777 #ifdef CONFIG_BLOCK 1778 static int read_partial_message_bio(struct ceph_connection *con, 1779 struct bio **bio_iter, int *bio_seg, 1780 unsigned int data_len, bool do_datacrc) 1781 { 1782 struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg); 1783 void *p; 1784 int ret, left; 1785 1786 left = min((int)(data_len - con->in_msg_pos.data_pos), 1787 (int)(bv->bv_len - con->in_msg_pos.page_pos)); 1788 1789 p = kmap(bv->bv_page) + bv->bv_offset; 1790 1791 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, 1792 left); 1793 if (ret > 0 && do_datacrc) 1794 con->in_data_crc = 1795 crc32c(con->in_data_crc, 1796 p + con->in_msg_pos.page_pos, ret); 1797 kunmap(bv->bv_page); 1798 if (ret <= 0) 1799 return ret; 1800 con->in_msg_pos.data_pos += ret; 1801 con->in_msg_pos.page_pos += ret; 1802 if (con->in_msg_pos.page_pos == bv->bv_len) { 1803 con->in_msg_pos.page_pos = 0; 1804 iter_bio_next(bio_iter, bio_seg); 1805 } 1806 1807 return ret; 1808 } 1809 #endif 1810 1811 /* 1812 * read (part of) a message. 1813 */ 1814 static int read_partial_message(struct ceph_connection *con) 1815 { 1816 struct ceph_msg *m = con->in_msg; 1817 int size; 1818 int end; 1819 int ret; 1820 unsigned int front_len, middle_len, data_len; 1821 bool do_datacrc = !con->msgr->nocrc; 1822 u64 seq; 1823 u32 crc; 1824 1825 dout("read_partial_message con %p msg %p\n", con, m); 1826 1827 /* header */ 1828 size = sizeof (con->in_hdr); 1829 end = size; 1830 ret = read_partial(con, end, size, &con->in_hdr); 1831 if (ret <= 0) 1832 return ret; 1833 1834 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc)); 1835 if (cpu_to_le32(crc) != con->in_hdr.crc) { 1836 pr_err("read_partial_message bad hdr " 1837 " crc %u != expected %u\n", 1838 crc, con->in_hdr.crc); 1839 return -EBADMSG; 1840 } 1841 1842 front_len = le32_to_cpu(con->in_hdr.front_len); 1843 if (front_len > CEPH_MSG_MAX_FRONT_LEN) 1844 return -EIO; 1845 middle_len = le32_to_cpu(con->in_hdr.middle_len); 1846 if (middle_len > CEPH_MSG_MAX_DATA_LEN) 1847 return -EIO; 1848 data_len = le32_to_cpu(con->in_hdr.data_len); 1849 if (data_len > CEPH_MSG_MAX_DATA_LEN) 1850 return -EIO; 1851 1852 /* verify seq# */ 1853 seq = le64_to_cpu(con->in_hdr.seq); 1854 if ((s64)seq - (s64)con->in_seq < 1) { 1855 pr_info("skipping %s%lld %s seq %lld expected %lld\n", 1856 ENTITY_NAME(con->peer_name), 1857 ceph_pr_addr(&con->peer_addr.in_addr), 1858 seq, con->in_seq + 1); 1859 con->in_base_pos = -front_len - middle_len - data_len - 1860 sizeof(m->footer); 1861 con->in_tag = CEPH_MSGR_TAG_READY; 1862 return 0; 1863 } else if ((s64)seq - (s64)con->in_seq > 1) { 1864 pr_err("read_partial_message bad seq %lld expected %lld\n", 1865 seq, con->in_seq + 1); 1866 con->error_msg = "bad message sequence # for incoming message"; 1867 return -EBADMSG; 1868 } 1869 1870 /* allocate message? */ 1871 if (!con->in_msg) { 1872 int skip = 0; 1873 1874 dout("got hdr type %d front %d data %d\n", con->in_hdr.type, 1875 con->in_hdr.front_len, con->in_hdr.data_len); 1876 ret = ceph_con_in_msg_alloc(con, &skip); 1877 if (ret < 0) 1878 return ret; 1879 if (skip) { 1880 /* skip this message */ 1881 dout("alloc_msg said skip message\n"); 1882 BUG_ON(con->in_msg); 1883 con->in_base_pos = -front_len - middle_len - data_len - 1884 sizeof(m->footer); 1885 con->in_tag = CEPH_MSGR_TAG_READY; 1886 con->in_seq++; 1887 return 0; 1888 } 1889 1890 BUG_ON(!con->in_msg); 1891 BUG_ON(con->in_msg->con != con); 1892 m = con->in_msg; 1893 m->front.iov_len = 0; /* haven't read it yet */ 1894 if (m->middle) 1895 m->middle->vec.iov_len = 0; 1896 1897 con->in_msg_pos.page = 0; 1898 if (m->pages) 1899 con->in_msg_pos.page_pos = m->page_alignment; 1900 else 1901 con->in_msg_pos.page_pos = 0; 1902 con->in_msg_pos.data_pos = 0; 1903 1904 #ifdef CONFIG_BLOCK 1905 if (m->bio) 1906 init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg); 1907 #endif 1908 } 1909 1910 /* front */ 1911 ret = read_partial_message_section(con, &m->front, front_len, 1912 &con->in_front_crc); 1913 if (ret <= 0) 1914 return ret; 1915 1916 /* middle */ 1917 if (m->middle) { 1918 ret = read_partial_message_section(con, &m->middle->vec, 1919 middle_len, 1920 &con->in_middle_crc); 1921 if (ret <= 0) 1922 return ret; 1923 } 1924 1925 /* (page) data */ 1926 while (con->in_msg_pos.data_pos < data_len) { 1927 if (m->pages) { 1928 ret = read_partial_message_pages(con, m->pages, 1929 data_len, do_datacrc); 1930 if (ret <= 0) 1931 return ret; 1932 #ifdef CONFIG_BLOCK 1933 } else if (m->bio) { 1934 BUG_ON(!m->bio_iter); 1935 ret = read_partial_message_bio(con, 1936 &m->bio_iter, &m->bio_seg, 1937 data_len, do_datacrc); 1938 if (ret <= 0) 1939 return ret; 1940 #endif 1941 } else { 1942 BUG_ON(1); 1943 } 1944 } 1945 1946 /* footer */ 1947 size = sizeof (m->footer); 1948 end += size; 1949 ret = read_partial(con, end, size, &m->footer); 1950 if (ret <= 0) 1951 return ret; 1952 1953 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n", 1954 m, front_len, m->footer.front_crc, middle_len, 1955 m->footer.middle_crc, data_len, m->footer.data_crc); 1956 1957 /* crc ok? */ 1958 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) { 1959 pr_err("read_partial_message %p front crc %u != exp. %u\n", 1960 m, con->in_front_crc, m->footer.front_crc); 1961 return -EBADMSG; 1962 } 1963 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) { 1964 pr_err("read_partial_message %p middle crc %u != exp %u\n", 1965 m, con->in_middle_crc, m->footer.middle_crc); 1966 return -EBADMSG; 1967 } 1968 if (do_datacrc && 1969 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 && 1970 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) { 1971 pr_err("read_partial_message %p data crc %u != exp. %u\n", m, 1972 con->in_data_crc, le32_to_cpu(m->footer.data_crc)); 1973 return -EBADMSG; 1974 } 1975 1976 return 1; /* done! */ 1977 } 1978 1979 /* 1980 * Process message. This happens in the worker thread. The callback should 1981 * be careful not to do anything that waits on other incoming messages or it 1982 * may deadlock. 1983 */ 1984 static void process_message(struct ceph_connection *con) 1985 { 1986 struct ceph_msg *msg; 1987 1988 BUG_ON(con->in_msg->con != con); 1989 con->in_msg->con = NULL; 1990 msg = con->in_msg; 1991 con->in_msg = NULL; 1992 con->ops->put(con); 1993 1994 /* if first message, set peer_name */ 1995 if (con->peer_name.type == 0) 1996 con->peer_name = msg->hdr.src; 1997 1998 con->in_seq++; 1999 mutex_unlock(&con->mutex); 2000 2001 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n", 2002 msg, le64_to_cpu(msg->hdr.seq), 2003 ENTITY_NAME(msg->hdr.src), 2004 le16_to_cpu(msg->hdr.type), 2005 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2006 le32_to_cpu(msg->hdr.front_len), 2007 le32_to_cpu(msg->hdr.data_len), 2008 con->in_front_crc, con->in_middle_crc, con->in_data_crc); 2009 con->ops->dispatch(con, msg); 2010 2011 mutex_lock(&con->mutex); 2012 } 2013 2014 2015 /* 2016 * Write something to the socket. Called in a worker thread when the 2017 * socket appears to be writeable and we have something ready to send. 2018 */ 2019 static int try_write(struct ceph_connection *con) 2020 { 2021 int ret = 1; 2022 2023 dout("try_write start %p state %lu\n", con, con->state); 2024 2025 more: 2026 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); 2027 2028 /* open the socket first? */ 2029 if (con->state == CON_STATE_PREOPEN) { 2030 BUG_ON(con->sock); 2031 con->state = CON_STATE_CONNECTING; 2032 2033 con_out_kvec_reset(con); 2034 prepare_write_banner(con); 2035 prepare_read_banner(con); 2036 2037 BUG_ON(con->in_msg); 2038 con->in_tag = CEPH_MSGR_TAG_READY; 2039 dout("try_write initiating connect on %p new state %lu\n", 2040 con, con->state); 2041 ret = ceph_tcp_connect(con); 2042 if (ret < 0) { 2043 con->error_msg = "connect error"; 2044 goto out; 2045 } 2046 } 2047 2048 more_kvec: 2049 /* kvec data queued? */ 2050 if (con->out_skip) { 2051 ret = write_partial_skip(con); 2052 if (ret <= 0) 2053 goto out; 2054 } 2055 if (con->out_kvec_left) { 2056 ret = write_partial_kvec(con); 2057 if (ret <= 0) 2058 goto out; 2059 } 2060 2061 /* msg pages? */ 2062 if (con->out_msg) { 2063 if (con->out_msg_done) { 2064 ceph_msg_put(con->out_msg); 2065 con->out_msg = NULL; /* we're done with this one */ 2066 goto do_next; 2067 } 2068 2069 ret = write_partial_msg_pages(con); 2070 if (ret == 1) 2071 goto more_kvec; /* we need to send the footer, too! */ 2072 if (ret == 0) 2073 goto out; 2074 if (ret < 0) { 2075 dout("try_write write_partial_msg_pages err %d\n", 2076 ret); 2077 goto out; 2078 } 2079 } 2080 2081 do_next: 2082 if (con->state == CON_STATE_OPEN) { 2083 /* is anything else pending? */ 2084 if (!list_empty(&con->out_queue)) { 2085 prepare_write_message(con); 2086 goto more; 2087 } 2088 if (con->in_seq > con->in_seq_acked) { 2089 prepare_write_ack(con); 2090 goto more; 2091 } 2092 if (test_and_clear_bit(CON_FLAG_KEEPALIVE_PENDING, 2093 &con->flags)) { 2094 prepare_write_keepalive(con); 2095 goto more; 2096 } 2097 } 2098 2099 /* Nothing to do! */ 2100 clear_bit(CON_FLAG_WRITE_PENDING, &con->flags); 2101 dout("try_write nothing else to write.\n"); 2102 ret = 0; 2103 out: 2104 dout("try_write done on %p ret %d\n", con, ret); 2105 return ret; 2106 } 2107 2108 2109 2110 /* 2111 * Read what we can from the socket. 2112 */ 2113 static int try_read(struct ceph_connection *con) 2114 { 2115 int ret = -1; 2116 2117 more: 2118 dout("try_read start on %p state %lu\n", con, con->state); 2119 if (con->state != CON_STATE_CONNECTING && 2120 con->state != CON_STATE_NEGOTIATING && 2121 con->state != CON_STATE_OPEN) 2122 return 0; 2123 2124 BUG_ON(!con->sock); 2125 2126 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, 2127 con->in_base_pos); 2128 2129 if (con->state == CON_STATE_CONNECTING) { 2130 dout("try_read connecting\n"); 2131 ret = read_partial_banner(con); 2132 if (ret <= 0) 2133 goto out; 2134 ret = process_banner(con); 2135 if (ret < 0) 2136 goto out; 2137 2138 BUG_ON(con->state != CON_STATE_CONNECTING); 2139 con->state = CON_STATE_NEGOTIATING; 2140 2141 /* 2142 * Received banner is good, exchange connection info. 2143 * Do not reset out_kvec, as sending our banner raced 2144 * with receiving peer banner after connect completed. 2145 */ 2146 ret = prepare_write_connect(con); 2147 if (ret < 0) 2148 goto out; 2149 prepare_read_connect(con); 2150 2151 /* Send connection info before awaiting response */ 2152 goto out; 2153 } 2154 2155 if (con->state == CON_STATE_NEGOTIATING) { 2156 dout("try_read negotiating\n"); 2157 ret = read_partial_connect(con); 2158 if (ret <= 0) 2159 goto out; 2160 ret = process_connect(con); 2161 if (ret < 0) 2162 goto out; 2163 goto more; 2164 } 2165 2166 BUG_ON(con->state != CON_STATE_OPEN); 2167 2168 if (con->in_base_pos < 0) { 2169 /* 2170 * skipping + discarding content. 2171 * 2172 * FIXME: there must be a better way to do this! 2173 */ 2174 static char buf[SKIP_BUF_SIZE]; 2175 int skip = min((int) sizeof (buf), -con->in_base_pos); 2176 2177 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); 2178 ret = ceph_tcp_recvmsg(con->sock, buf, skip); 2179 if (ret <= 0) 2180 goto out; 2181 con->in_base_pos += ret; 2182 if (con->in_base_pos) 2183 goto more; 2184 } 2185 if (con->in_tag == CEPH_MSGR_TAG_READY) { 2186 /* 2187 * what's next? 2188 */ 2189 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); 2190 if (ret <= 0) 2191 goto out; 2192 dout("try_read got tag %d\n", (int)con->in_tag); 2193 switch (con->in_tag) { 2194 case CEPH_MSGR_TAG_MSG: 2195 prepare_read_message(con); 2196 break; 2197 case CEPH_MSGR_TAG_ACK: 2198 prepare_read_ack(con); 2199 break; 2200 case CEPH_MSGR_TAG_CLOSE: 2201 con_close_socket(con); 2202 con->state = CON_STATE_CLOSED; 2203 goto out; 2204 default: 2205 goto bad_tag; 2206 } 2207 } 2208 if (con->in_tag == CEPH_MSGR_TAG_MSG) { 2209 ret = read_partial_message(con); 2210 if (ret <= 0) { 2211 switch (ret) { 2212 case -EBADMSG: 2213 con->error_msg = "bad crc"; 2214 ret = -EIO; 2215 break; 2216 case -EIO: 2217 con->error_msg = "io error"; 2218 break; 2219 } 2220 goto out; 2221 } 2222 if (con->in_tag == CEPH_MSGR_TAG_READY) 2223 goto more; 2224 process_message(con); 2225 if (con->state == CON_STATE_OPEN) 2226 prepare_read_tag(con); 2227 goto more; 2228 } 2229 if (con->in_tag == CEPH_MSGR_TAG_ACK) { 2230 ret = read_partial_ack(con); 2231 if (ret <= 0) 2232 goto out; 2233 process_ack(con); 2234 goto more; 2235 } 2236 2237 out: 2238 dout("try_read done on %p ret %d\n", con, ret); 2239 return ret; 2240 2241 bad_tag: 2242 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag); 2243 con->error_msg = "protocol error, garbage tag"; 2244 ret = -1; 2245 goto out; 2246 } 2247 2248 2249 /* 2250 * Atomically queue work on a connection. Bump @con reference to 2251 * avoid races with connection teardown. 2252 */ 2253 static void queue_con(struct ceph_connection *con) 2254 { 2255 if (!con->ops->get(con)) { 2256 dout("queue_con %p ref count 0\n", con); 2257 return; 2258 } 2259 2260 if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) { 2261 dout("queue_con %p - already queued\n", con); 2262 con->ops->put(con); 2263 } else { 2264 dout("queue_con %p\n", con); 2265 } 2266 } 2267 2268 /* 2269 * Do some work on a connection. Drop a connection ref when we're done. 2270 */ 2271 static void con_work(struct work_struct *work) 2272 { 2273 struct ceph_connection *con = container_of(work, struct ceph_connection, 2274 work.work); 2275 int ret; 2276 2277 mutex_lock(&con->mutex); 2278 restart: 2279 if (test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags)) { 2280 switch (con->state) { 2281 case CON_STATE_CONNECTING: 2282 con->error_msg = "connection failed"; 2283 break; 2284 case CON_STATE_NEGOTIATING: 2285 con->error_msg = "negotiation failed"; 2286 break; 2287 case CON_STATE_OPEN: 2288 con->error_msg = "socket closed"; 2289 break; 2290 default: 2291 dout("unrecognized con state %d\n", (int)con->state); 2292 con->error_msg = "unrecognized con state"; 2293 BUG(); 2294 } 2295 goto fault; 2296 } 2297 2298 if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) { 2299 dout("con_work %p backing off\n", con); 2300 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2301 round_jiffies_relative(con->delay))) { 2302 dout("con_work %p backoff %lu\n", con, con->delay); 2303 mutex_unlock(&con->mutex); 2304 return; 2305 } else { 2306 con->ops->put(con); 2307 dout("con_work %p FAILED to back off %lu\n", con, 2308 con->delay); 2309 } 2310 } 2311 2312 if (con->state == CON_STATE_STANDBY) { 2313 dout("con_work %p STANDBY\n", con); 2314 goto done; 2315 } 2316 if (con->state == CON_STATE_CLOSED) { 2317 dout("con_work %p CLOSED\n", con); 2318 BUG_ON(con->sock); 2319 goto done; 2320 } 2321 if (con->state == CON_STATE_PREOPEN) { 2322 dout("con_work OPENING\n"); 2323 BUG_ON(con->sock); 2324 } 2325 2326 ret = try_read(con); 2327 if (ret == -EAGAIN) 2328 goto restart; 2329 if (ret < 0) { 2330 con->error_msg = "socket error on read"; 2331 goto fault; 2332 } 2333 2334 ret = try_write(con); 2335 if (ret == -EAGAIN) 2336 goto restart; 2337 if (ret < 0) { 2338 con->error_msg = "socket error on write"; 2339 goto fault; 2340 } 2341 2342 done: 2343 mutex_unlock(&con->mutex); 2344 done_unlocked: 2345 con->ops->put(con); 2346 return; 2347 2348 fault: 2349 ceph_fault(con); /* error/fault path */ 2350 goto done_unlocked; 2351 } 2352 2353 2354 /* 2355 * Generic error/fault handler. A retry mechanism is used with 2356 * exponential backoff 2357 */ 2358 static void ceph_fault(struct ceph_connection *con) 2359 __releases(con->mutex) 2360 { 2361 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), 2362 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); 2363 dout("fault %p state %lu to peer %s\n", 2364 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr)); 2365 2366 BUG_ON(con->state != CON_STATE_CONNECTING && 2367 con->state != CON_STATE_NEGOTIATING && 2368 con->state != CON_STATE_OPEN); 2369 2370 con_close_socket(con); 2371 2372 if (test_bit(CON_FLAG_LOSSYTX, &con->flags)) { 2373 dout("fault on LOSSYTX channel, marking CLOSED\n"); 2374 con->state = CON_STATE_CLOSED; 2375 goto out_unlock; 2376 } 2377 2378 if (con->in_msg) { 2379 BUG_ON(con->in_msg->con != con); 2380 con->in_msg->con = NULL; 2381 ceph_msg_put(con->in_msg); 2382 con->in_msg = NULL; 2383 con->ops->put(con); 2384 } 2385 2386 /* Requeue anything that hasn't been acked */ 2387 list_splice_init(&con->out_sent, &con->out_queue); 2388 2389 /* If there are no messages queued or keepalive pending, place 2390 * the connection in a STANDBY state */ 2391 if (list_empty(&con->out_queue) && 2392 !test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags)) { 2393 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); 2394 clear_bit(CON_FLAG_WRITE_PENDING, &con->flags); 2395 con->state = CON_STATE_STANDBY; 2396 } else { 2397 /* retry after a delay. */ 2398 con->state = CON_STATE_PREOPEN; 2399 if (con->delay == 0) 2400 con->delay = BASE_DELAY_INTERVAL; 2401 else if (con->delay < MAX_DELAY_INTERVAL) 2402 con->delay *= 2; 2403 con->ops->get(con); 2404 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2405 round_jiffies_relative(con->delay))) { 2406 dout("fault queued %p delay %lu\n", con, con->delay); 2407 } else { 2408 con->ops->put(con); 2409 dout("fault failed to queue %p delay %lu, backoff\n", 2410 con, con->delay); 2411 /* 2412 * In many cases we see a socket state change 2413 * while con_work is running and end up 2414 * queuing (non-delayed) work, such that we 2415 * can't backoff with a delay. Set a flag so 2416 * that when con_work restarts we schedule the 2417 * delay then. 2418 */ 2419 set_bit(CON_FLAG_BACKOFF, &con->flags); 2420 } 2421 } 2422 2423 out_unlock: 2424 mutex_unlock(&con->mutex); 2425 /* 2426 * in case we faulted due to authentication, invalidate our 2427 * current tickets so that we can get new ones. 2428 */ 2429 if (con->auth_retry && con->ops->invalidate_authorizer) { 2430 dout("calling invalidate_authorizer()\n"); 2431 con->ops->invalidate_authorizer(con); 2432 } 2433 2434 if (con->ops->fault) 2435 con->ops->fault(con); 2436 } 2437 2438 2439 2440 /* 2441 * initialize a new messenger instance 2442 */ 2443 void ceph_messenger_init(struct ceph_messenger *msgr, 2444 struct ceph_entity_addr *myaddr, 2445 u32 supported_features, 2446 u32 required_features, 2447 bool nocrc) 2448 { 2449 msgr->supported_features = supported_features; 2450 msgr->required_features = required_features; 2451 2452 spin_lock_init(&msgr->global_seq_lock); 2453 2454 if (myaddr) 2455 msgr->inst.addr = *myaddr; 2456 2457 /* select a random nonce */ 2458 msgr->inst.addr.type = 0; 2459 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); 2460 encode_my_addr(msgr); 2461 msgr->nocrc = nocrc; 2462 2463 atomic_set(&msgr->stopping, 0); 2464 2465 dout("%s %p\n", __func__, msgr); 2466 } 2467 EXPORT_SYMBOL(ceph_messenger_init); 2468 2469 static void clear_standby(struct ceph_connection *con) 2470 { 2471 /* come back from STANDBY? */ 2472 if (con->state == CON_STATE_STANDBY) { 2473 dout("clear_standby %p and ++connect_seq\n", con); 2474 con->state = CON_STATE_PREOPEN; 2475 con->connect_seq++; 2476 WARN_ON(test_bit(CON_FLAG_WRITE_PENDING, &con->flags)); 2477 WARN_ON(test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags)); 2478 } 2479 } 2480 2481 /* 2482 * Queue up an outgoing message on the given connection. 2483 */ 2484 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) 2485 { 2486 /* set src+dst */ 2487 msg->hdr.src = con->msgr->inst.name; 2488 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len)); 2489 msg->needs_out_seq = true; 2490 2491 mutex_lock(&con->mutex); 2492 2493 if (con->state == CON_STATE_CLOSED) { 2494 dout("con_send %p closed, dropping %p\n", con, msg); 2495 ceph_msg_put(msg); 2496 mutex_unlock(&con->mutex); 2497 return; 2498 } 2499 2500 BUG_ON(msg->con != NULL); 2501 msg->con = con->ops->get(con); 2502 BUG_ON(msg->con == NULL); 2503 2504 BUG_ON(!list_empty(&msg->list_head)); 2505 list_add_tail(&msg->list_head, &con->out_queue); 2506 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg, 2507 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type), 2508 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2509 le32_to_cpu(msg->hdr.front_len), 2510 le32_to_cpu(msg->hdr.middle_len), 2511 le32_to_cpu(msg->hdr.data_len)); 2512 2513 clear_standby(con); 2514 mutex_unlock(&con->mutex); 2515 2516 /* if there wasn't anything waiting to send before, queue 2517 * new work */ 2518 if (test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0) 2519 queue_con(con); 2520 } 2521 EXPORT_SYMBOL(ceph_con_send); 2522 2523 /* 2524 * Revoke a message that was previously queued for send 2525 */ 2526 void ceph_msg_revoke(struct ceph_msg *msg) 2527 { 2528 struct ceph_connection *con = msg->con; 2529 2530 if (!con) 2531 return; /* Message not in our possession */ 2532 2533 mutex_lock(&con->mutex); 2534 if (!list_empty(&msg->list_head)) { 2535 dout("%s %p msg %p - was on queue\n", __func__, con, msg); 2536 list_del_init(&msg->list_head); 2537 BUG_ON(msg->con == NULL); 2538 msg->con->ops->put(msg->con); 2539 msg->con = NULL; 2540 msg->hdr.seq = 0; 2541 2542 ceph_msg_put(msg); 2543 } 2544 if (con->out_msg == msg) { 2545 dout("%s %p msg %p - was sending\n", __func__, con, msg); 2546 con->out_msg = NULL; 2547 if (con->out_kvec_is_msg) { 2548 con->out_skip = con->out_kvec_bytes; 2549 con->out_kvec_is_msg = false; 2550 } 2551 msg->hdr.seq = 0; 2552 2553 ceph_msg_put(msg); 2554 } 2555 mutex_unlock(&con->mutex); 2556 } 2557 2558 /* 2559 * Revoke a message that we may be reading data into 2560 */ 2561 void ceph_msg_revoke_incoming(struct ceph_msg *msg) 2562 { 2563 struct ceph_connection *con; 2564 2565 BUG_ON(msg == NULL); 2566 if (!msg->con) { 2567 dout("%s msg %p null con\n", __func__, msg); 2568 2569 return; /* Message not in our possession */ 2570 } 2571 2572 con = msg->con; 2573 mutex_lock(&con->mutex); 2574 if (con->in_msg == msg) { 2575 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len); 2576 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len); 2577 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len); 2578 2579 /* skip rest of message */ 2580 dout("%s %p msg %p revoked\n", __func__, con, msg); 2581 con->in_base_pos = con->in_base_pos - 2582 sizeof(struct ceph_msg_header) - 2583 front_len - 2584 middle_len - 2585 data_len - 2586 sizeof(struct ceph_msg_footer); 2587 ceph_msg_put(con->in_msg); 2588 con->in_msg = NULL; 2589 con->in_tag = CEPH_MSGR_TAG_READY; 2590 con->in_seq++; 2591 } else { 2592 dout("%s %p in_msg %p msg %p no-op\n", 2593 __func__, con, con->in_msg, msg); 2594 } 2595 mutex_unlock(&con->mutex); 2596 } 2597 2598 /* 2599 * Queue a keepalive byte to ensure the tcp connection is alive. 2600 */ 2601 void ceph_con_keepalive(struct ceph_connection *con) 2602 { 2603 dout("con_keepalive %p\n", con); 2604 mutex_lock(&con->mutex); 2605 clear_standby(con); 2606 mutex_unlock(&con->mutex); 2607 if (test_and_set_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags) == 0 && 2608 test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0) 2609 queue_con(con); 2610 } 2611 EXPORT_SYMBOL(ceph_con_keepalive); 2612 2613 2614 /* 2615 * construct a new message with given type, size 2616 * the new msg has a ref count of 1. 2617 */ 2618 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, 2619 bool can_fail) 2620 { 2621 struct ceph_msg *m; 2622 2623 m = kmalloc(sizeof(*m), flags); 2624 if (m == NULL) 2625 goto out; 2626 kref_init(&m->kref); 2627 2628 m->con = NULL; 2629 INIT_LIST_HEAD(&m->list_head); 2630 2631 m->hdr.tid = 0; 2632 m->hdr.type = cpu_to_le16(type); 2633 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT); 2634 m->hdr.version = 0; 2635 m->hdr.front_len = cpu_to_le32(front_len); 2636 m->hdr.middle_len = 0; 2637 m->hdr.data_len = 0; 2638 m->hdr.data_off = 0; 2639 m->hdr.reserved = 0; 2640 m->footer.front_crc = 0; 2641 m->footer.middle_crc = 0; 2642 m->footer.data_crc = 0; 2643 m->footer.flags = 0; 2644 m->front_max = front_len; 2645 m->front_is_vmalloc = false; 2646 m->more_to_follow = false; 2647 m->ack_stamp = 0; 2648 m->pool = NULL; 2649 2650 /* middle */ 2651 m->middle = NULL; 2652 2653 /* data */ 2654 m->nr_pages = 0; 2655 m->page_alignment = 0; 2656 m->pages = NULL; 2657 m->pagelist = NULL; 2658 m->bio = NULL; 2659 m->bio_iter = NULL; 2660 m->bio_seg = 0; 2661 m->trail = NULL; 2662 2663 /* front */ 2664 if (front_len) { 2665 if (front_len > PAGE_CACHE_SIZE) { 2666 m->front.iov_base = __vmalloc(front_len, flags, 2667 PAGE_KERNEL); 2668 m->front_is_vmalloc = true; 2669 } else { 2670 m->front.iov_base = kmalloc(front_len, flags); 2671 } 2672 if (m->front.iov_base == NULL) { 2673 dout("ceph_msg_new can't allocate %d bytes\n", 2674 front_len); 2675 goto out2; 2676 } 2677 } else { 2678 m->front.iov_base = NULL; 2679 } 2680 m->front.iov_len = front_len; 2681 2682 dout("ceph_msg_new %p front %d\n", m, front_len); 2683 return m; 2684 2685 out2: 2686 ceph_msg_put(m); 2687 out: 2688 if (!can_fail) { 2689 pr_err("msg_new can't create type %d front %d\n", type, 2690 front_len); 2691 WARN_ON(1); 2692 } else { 2693 dout("msg_new can't create type %d front %d\n", type, 2694 front_len); 2695 } 2696 return NULL; 2697 } 2698 EXPORT_SYMBOL(ceph_msg_new); 2699 2700 /* 2701 * Allocate "middle" portion of a message, if it is needed and wasn't 2702 * allocated by alloc_msg. This allows us to read a small fixed-size 2703 * per-type header in the front and then gracefully fail (i.e., 2704 * propagate the error to the caller based on info in the front) when 2705 * the middle is too large. 2706 */ 2707 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) 2708 { 2709 int type = le16_to_cpu(msg->hdr.type); 2710 int middle_len = le32_to_cpu(msg->hdr.middle_len); 2711 2712 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type, 2713 ceph_msg_type_name(type), middle_len); 2714 BUG_ON(!middle_len); 2715 BUG_ON(msg->middle); 2716 2717 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS); 2718 if (!msg->middle) 2719 return -ENOMEM; 2720 return 0; 2721 } 2722 2723 /* 2724 * Allocate a message for receiving an incoming message on a 2725 * connection, and save the result in con->in_msg. Uses the 2726 * connection's private alloc_msg op if available. 2727 * 2728 * Returns 0 on success, or a negative error code. 2729 * 2730 * On success, if we set *skip = 1: 2731 * - the next message should be skipped and ignored. 2732 * - con->in_msg == NULL 2733 * or if we set *skip = 0: 2734 * - con->in_msg is non-null. 2735 * On error (ENOMEM, EAGAIN, ...), 2736 * - con->in_msg == NULL 2737 */ 2738 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip) 2739 { 2740 struct ceph_msg_header *hdr = &con->in_hdr; 2741 int type = le16_to_cpu(hdr->type); 2742 int front_len = le32_to_cpu(hdr->front_len); 2743 int middle_len = le32_to_cpu(hdr->middle_len); 2744 int ret = 0; 2745 2746 BUG_ON(con->in_msg != NULL); 2747 2748 if (con->ops->alloc_msg) { 2749 struct ceph_msg *msg; 2750 2751 mutex_unlock(&con->mutex); 2752 msg = con->ops->alloc_msg(con, hdr, skip); 2753 mutex_lock(&con->mutex); 2754 if (con->state != CON_STATE_OPEN) { 2755 ceph_msg_put(msg); 2756 return -EAGAIN; 2757 } 2758 con->in_msg = msg; 2759 if (con->in_msg) { 2760 con->in_msg->con = con->ops->get(con); 2761 BUG_ON(con->in_msg->con == NULL); 2762 } 2763 if (*skip) { 2764 con->in_msg = NULL; 2765 return 0; 2766 } 2767 if (!con->in_msg) { 2768 con->error_msg = 2769 "error allocating memory for incoming message"; 2770 return -ENOMEM; 2771 } 2772 } 2773 if (!con->in_msg) { 2774 con->in_msg = ceph_msg_new(type, front_len, GFP_NOFS, false); 2775 if (!con->in_msg) { 2776 pr_err("unable to allocate msg type %d len %d\n", 2777 type, front_len); 2778 return -ENOMEM; 2779 } 2780 con->in_msg->con = con->ops->get(con); 2781 BUG_ON(con->in_msg->con == NULL); 2782 con->in_msg->page_alignment = le16_to_cpu(hdr->data_off); 2783 } 2784 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); 2785 2786 if (middle_len && !con->in_msg->middle) { 2787 ret = ceph_alloc_middle(con, con->in_msg); 2788 if (ret < 0) { 2789 ceph_msg_put(con->in_msg); 2790 con->in_msg = NULL; 2791 } 2792 } 2793 2794 return ret; 2795 } 2796 2797 2798 /* 2799 * Free a generically kmalloc'd message. 2800 */ 2801 void ceph_msg_kfree(struct ceph_msg *m) 2802 { 2803 dout("msg_kfree %p\n", m); 2804 if (m->front_is_vmalloc) 2805 vfree(m->front.iov_base); 2806 else 2807 kfree(m->front.iov_base); 2808 kfree(m); 2809 } 2810 2811 /* 2812 * Drop a msg ref. Destroy as needed. 2813 */ 2814 void ceph_msg_last_put(struct kref *kref) 2815 { 2816 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref); 2817 2818 dout("ceph_msg_put last one on %p\n", m); 2819 WARN_ON(!list_empty(&m->list_head)); 2820 2821 /* drop middle, data, if any */ 2822 if (m->middle) { 2823 ceph_buffer_put(m->middle); 2824 m->middle = NULL; 2825 } 2826 m->nr_pages = 0; 2827 m->pages = NULL; 2828 2829 if (m->pagelist) { 2830 ceph_pagelist_release(m->pagelist); 2831 kfree(m->pagelist); 2832 m->pagelist = NULL; 2833 } 2834 2835 m->trail = NULL; 2836 2837 if (m->pool) 2838 ceph_msgpool_put(m->pool, m); 2839 else 2840 ceph_msg_kfree(m); 2841 } 2842 EXPORT_SYMBOL(ceph_msg_last_put); 2843 2844 void ceph_msg_dump(struct ceph_msg *msg) 2845 { 2846 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg, 2847 msg->front_max, msg->nr_pages); 2848 print_hex_dump(KERN_DEBUG, "header: ", 2849 DUMP_PREFIX_OFFSET, 16, 1, 2850 &msg->hdr, sizeof(msg->hdr), true); 2851 print_hex_dump(KERN_DEBUG, " front: ", 2852 DUMP_PREFIX_OFFSET, 16, 1, 2853 msg->front.iov_base, msg->front.iov_len, true); 2854 if (msg->middle) 2855 print_hex_dump(KERN_DEBUG, "middle: ", 2856 DUMP_PREFIX_OFFSET, 16, 1, 2857 msg->middle->vec.iov_base, 2858 msg->middle->vec.iov_len, true); 2859 print_hex_dump(KERN_DEBUG, "footer: ", 2860 DUMP_PREFIX_OFFSET, 16, 1, 2861 &msg->footer, sizeof(msg->footer), true); 2862 } 2863 EXPORT_SYMBOL(ceph_msg_dump); 2864