1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/crc32c.h> 4 #include <linux/ctype.h> 5 #include <linux/highmem.h> 6 #include <linux/inet.h> 7 #include <linux/kthread.h> 8 #include <linux/net.h> 9 #include <linux/slab.h> 10 #include <linux/socket.h> 11 #include <linux/string.h> 12 #include <linux/bio.h> 13 #include <linux/blkdev.h> 14 #include <linux/dns_resolver.h> 15 #include <net/tcp.h> 16 17 #include <linux/ceph/libceph.h> 18 #include <linux/ceph/messenger.h> 19 #include <linux/ceph/decode.h> 20 #include <linux/ceph/pagelist.h> 21 #include <linux/export.h> 22 23 /* 24 * Ceph uses the messenger to exchange ceph_msg messages with other 25 * hosts in the system. The messenger provides ordered and reliable 26 * delivery. We tolerate TCP disconnects by reconnecting (with 27 * exponential backoff) in the case of a fault (disconnection, bad 28 * crc, protocol error). Acks allow sent messages to be discarded by 29 * the sender. 30 */ 31 32 /* 33 * We track the state of the socket on a given connection using 34 * values defined below. The transition to a new socket state is 35 * handled by a function which verifies we aren't coming from an 36 * unexpected state. 37 * 38 * -------- 39 * | NEW* | transient initial state 40 * -------- 41 * | con_sock_state_init() 42 * v 43 * ---------- 44 * | CLOSED | initialized, but no socket (and no 45 * ---------- TCP connection) 46 * ^ \ 47 * | \ con_sock_state_connecting() 48 * | ---------------------- 49 * | \ 50 * + con_sock_state_closed() \ 51 * |+--------------------------- \ 52 * | \ \ \ 53 * | ----------- \ \ 54 * | | CLOSING | socket event; \ \ 55 * | ----------- await close \ \ 56 * | ^ \ | 57 * | | \ | 58 * | + con_sock_state_closing() \ | 59 * | / \ | | 60 * | / --------------- | | 61 * | / \ v v 62 * | / -------------- 63 * | / -----------------| CONNECTING | socket created, TCP 64 * | | / -------------- connect initiated 65 * | | | con_sock_state_connected() 66 * | | v 67 * ------------- 68 * | CONNECTED | TCP connection established 69 * ------------- 70 * 71 * State values for ceph_connection->sock_state; NEW is assumed to be 0. 72 */ 73 74 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */ 75 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */ 76 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */ 77 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */ 78 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */ 79 80 /* 81 * connection states 82 */ 83 #define CON_STATE_CLOSED 1 /* -> PREOPEN */ 84 #define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */ 85 #define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */ 86 #define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */ 87 #define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */ 88 #define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */ 89 90 /* 91 * ceph_connection flag bits 92 */ 93 #define CON_FLAG_LOSSYTX 0 /* we can close channel or drop 94 * messages on errors */ 95 #define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */ 96 #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */ 97 #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */ 98 #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */ 99 100 /* static tag bytes (protocol control messages) */ 101 static char tag_msg = CEPH_MSGR_TAG_MSG; 102 static char tag_ack = CEPH_MSGR_TAG_ACK; 103 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE; 104 105 #ifdef CONFIG_LOCKDEP 106 static struct lock_class_key socket_class; 107 #endif 108 109 /* 110 * When skipping (ignoring) a block of input we read it into a "skip 111 * buffer," which is this many bytes in size. 112 */ 113 #define SKIP_BUF_SIZE 1024 114 115 static void queue_con(struct ceph_connection *con); 116 static void con_work(struct work_struct *); 117 static void ceph_fault(struct ceph_connection *con); 118 119 /* 120 * Nicely render a sockaddr as a string. An array of formatted 121 * strings is used, to approximate reentrancy. 122 */ 123 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */ 124 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG) 125 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1) 126 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */ 127 128 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN]; 129 static atomic_t addr_str_seq = ATOMIC_INIT(0); 130 131 static struct page *zero_page; /* used in certain error cases */ 132 133 const char *ceph_pr_addr(const struct sockaddr_storage *ss) 134 { 135 int i; 136 char *s; 137 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 138 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 139 140 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK; 141 s = addr_str[i]; 142 143 switch (ss->ss_family) { 144 case AF_INET: 145 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr, 146 ntohs(in4->sin_port)); 147 break; 148 149 case AF_INET6: 150 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr, 151 ntohs(in6->sin6_port)); 152 break; 153 154 default: 155 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)", 156 ss->ss_family); 157 } 158 159 return s; 160 } 161 EXPORT_SYMBOL(ceph_pr_addr); 162 163 static void encode_my_addr(struct ceph_messenger *msgr) 164 { 165 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr)); 166 ceph_encode_addr(&msgr->my_enc_addr); 167 } 168 169 /* 170 * work queue for all reading and writing to/from the socket. 171 */ 172 static struct workqueue_struct *ceph_msgr_wq; 173 174 void _ceph_msgr_exit(void) 175 { 176 if (ceph_msgr_wq) { 177 destroy_workqueue(ceph_msgr_wq); 178 ceph_msgr_wq = NULL; 179 } 180 181 BUG_ON(zero_page == NULL); 182 kunmap(zero_page); 183 page_cache_release(zero_page); 184 zero_page = NULL; 185 } 186 187 int ceph_msgr_init(void) 188 { 189 BUG_ON(zero_page != NULL); 190 zero_page = ZERO_PAGE(0); 191 page_cache_get(zero_page); 192 193 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0); 194 if (ceph_msgr_wq) 195 return 0; 196 197 pr_err("msgr_init failed to create workqueue\n"); 198 _ceph_msgr_exit(); 199 200 return -ENOMEM; 201 } 202 EXPORT_SYMBOL(ceph_msgr_init); 203 204 void ceph_msgr_exit(void) 205 { 206 BUG_ON(ceph_msgr_wq == NULL); 207 208 _ceph_msgr_exit(); 209 } 210 EXPORT_SYMBOL(ceph_msgr_exit); 211 212 void ceph_msgr_flush(void) 213 { 214 flush_workqueue(ceph_msgr_wq); 215 } 216 EXPORT_SYMBOL(ceph_msgr_flush); 217 218 /* Connection socket state transition functions */ 219 220 static void con_sock_state_init(struct ceph_connection *con) 221 { 222 int old_state; 223 224 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 225 if (WARN_ON(old_state != CON_SOCK_STATE_NEW)) 226 printk("%s: unexpected old state %d\n", __func__, old_state); 227 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 228 CON_SOCK_STATE_CLOSED); 229 } 230 231 static void con_sock_state_connecting(struct ceph_connection *con) 232 { 233 int old_state; 234 235 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING); 236 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED)) 237 printk("%s: unexpected old state %d\n", __func__, old_state); 238 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 239 CON_SOCK_STATE_CONNECTING); 240 } 241 242 static void con_sock_state_connected(struct ceph_connection *con) 243 { 244 int old_state; 245 246 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED); 247 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING)) 248 printk("%s: unexpected old state %d\n", __func__, old_state); 249 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 250 CON_SOCK_STATE_CONNECTED); 251 } 252 253 static void con_sock_state_closing(struct ceph_connection *con) 254 { 255 int old_state; 256 257 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING); 258 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING && 259 old_state != CON_SOCK_STATE_CONNECTED && 260 old_state != CON_SOCK_STATE_CLOSING)) 261 printk("%s: unexpected old state %d\n", __func__, old_state); 262 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 263 CON_SOCK_STATE_CLOSING); 264 } 265 266 static void con_sock_state_closed(struct ceph_connection *con) 267 { 268 int old_state; 269 270 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED); 271 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED && 272 old_state != CON_SOCK_STATE_CLOSING && 273 old_state != CON_SOCK_STATE_CONNECTING && 274 old_state != CON_SOCK_STATE_CLOSED)) 275 printk("%s: unexpected old state %d\n", __func__, old_state); 276 dout("%s con %p sock %d -> %d\n", __func__, con, old_state, 277 CON_SOCK_STATE_CLOSED); 278 } 279 280 /* 281 * socket callback functions 282 */ 283 284 /* data available on socket, or listen socket received a connect */ 285 static void ceph_sock_data_ready(struct sock *sk, int count_unused) 286 { 287 struct ceph_connection *con = sk->sk_user_data; 288 if (atomic_read(&con->msgr->stopping)) { 289 return; 290 } 291 292 if (sk->sk_state != TCP_CLOSE_WAIT) { 293 dout("%s on %p state = %lu, queueing work\n", __func__, 294 con, con->state); 295 queue_con(con); 296 } 297 } 298 299 /* socket has buffer space for writing */ 300 static void ceph_sock_write_space(struct sock *sk) 301 { 302 struct ceph_connection *con = sk->sk_user_data; 303 304 /* only queue to workqueue if there is data we want to write, 305 * and there is sufficient space in the socket buffer to accept 306 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space() 307 * doesn't get called again until try_write() fills the socket 308 * buffer. See net/ipv4/tcp_input.c:tcp_check_space() 309 * and net/core/stream.c:sk_stream_write_space(). 310 */ 311 if (test_bit(CON_FLAG_WRITE_PENDING, &con->flags)) { 312 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { 313 dout("%s %p queueing write work\n", __func__, con); 314 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 315 queue_con(con); 316 } 317 } else { 318 dout("%s %p nothing to write\n", __func__, con); 319 } 320 } 321 322 /* socket's state has changed */ 323 static void ceph_sock_state_change(struct sock *sk) 324 { 325 struct ceph_connection *con = sk->sk_user_data; 326 327 dout("%s %p state = %lu sk_state = %u\n", __func__, 328 con, con->state, sk->sk_state); 329 330 switch (sk->sk_state) { 331 case TCP_CLOSE: 332 dout("%s TCP_CLOSE\n", __func__); 333 case TCP_CLOSE_WAIT: 334 dout("%s TCP_CLOSE_WAIT\n", __func__); 335 con_sock_state_closing(con); 336 set_bit(CON_FLAG_SOCK_CLOSED, &con->flags); 337 queue_con(con); 338 break; 339 case TCP_ESTABLISHED: 340 dout("%s TCP_ESTABLISHED\n", __func__); 341 con_sock_state_connected(con); 342 queue_con(con); 343 break; 344 default: /* Everything else is uninteresting */ 345 break; 346 } 347 } 348 349 /* 350 * set up socket callbacks 351 */ 352 static void set_sock_callbacks(struct socket *sock, 353 struct ceph_connection *con) 354 { 355 struct sock *sk = sock->sk; 356 sk->sk_user_data = con; 357 sk->sk_data_ready = ceph_sock_data_ready; 358 sk->sk_write_space = ceph_sock_write_space; 359 sk->sk_state_change = ceph_sock_state_change; 360 } 361 362 363 /* 364 * socket helpers 365 */ 366 367 /* 368 * initiate connection to a remote socket. 369 */ 370 static int ceph_tcp_connect(struct ceph_connection *con) 371 { 372 struct sockaddr_storage *paddr = &con->peer_addr.in_addr; 373 struct socket *sock; 374 int ret; 375 376 BUG_ON(con->sock); 377 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM, 378 IPPROTO_TCP, &sock); 379 if (ret) 380 return ret; 381 sock->sk->sk_allocation = GFP_NOFS; 382 383 #ifdef CONFIG_LOCKDEP 384 lockdep_set_class(&sock->sk->sk_lock, &socket_class); 385 #endif 386 387 set_sock_callbacks(sock, con); 388 389 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr)); 390 391 con_sock_state_connecting(con); 392 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr), 393 O_NONBLOCK); 394 if (ret == -EINPROGRESS) { 395 dout("connect %s EINPROGRESS sk_state = %u\n", 396 ceph_pr_addr(&con->peer_addr.in_addr), 397 sock->sk->sk_state); 398 } else if (ret < 0) { 399 pr_err("connect %s error %d\n", 400 ceph_pr_addr(&con->peer_addr.in_addr), ret); 401 sock_release(sock); 402 con->error_msg = "connect error"; 403 404 return ret; 405 } 406 con->sock = sock; 407 return 0; 408 } 409 410 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) 411 { 412 struct kvec iov = {buf, len}; 413 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 414 int r; 415 416 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); 417 if (r == -EAGAIN) 418 r = 0; 419 return r; 420 } 421 422 /* 423 * write something. @more is true if caller will be sending more data 424 * shortly. 425 */ 426 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, 427 size_t kvlen, size_t len, int more) 428 { 429 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 430 int r; 431 432 if (more) 433 msg.msg_flags |= MSG_MORE; 434 else 435 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 436 437 r = kernel_sendmsg(sock, &msg, iov, kvlen, len); 438 if (r == -EAGAIN) 439 r = 0; 440 return r; 441 } 442 443 static int ceph_tcp_sendpage(struct socket *sock, struct page *page, 444 int offset, size_t size, int more) 445 { 446 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR); 447 int ret; 448 449 ret = kernel_sendpage(sock, page, offset, size, flags); 450 if (ret == -EAGAIN) 451 ret = 0; 452 453 return ret; 454 } 455 456 457 /* 458 * Shutdown/close the socket for the given connection. 459 */ 460 static int con_close_socket(struct ceph_connection *con) 461 { 462 int rc = 0; 463 464 dout("con_close_socket on %p sock %p\n", con, con->sock); 465 if (con->sock) { 466 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR); 467 sock_release(con->sock); 468 con->sock = NULL; 469 } 470 471 /* 472 * Forcibly clear the SOCK_CLOSED flag. It gets set 473 * independent of the connection mutex, and we could have 474 * received a socket close event before we had the chance to 475 * shut the socket down. 476 */ 477 clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags); 478 479 con_sock_state_closed(con); 480 return rc; 481 } 482 483 /* 484 * Reset a connection. Discard all incoming and outgoing messages 485 * and clear *_seq state. 486 */ 487 static void ceph_msg_remove(struct ceph_msg *msg) 488 { 489 list_del_init(&msg->list_head); 490 BUG_ON(msg->con == NULL); 491 msg->con->ops->put(msg->con); 492 msg->con = NULL; 493 494 ceph_msg_put(msg); 495 } 496 static void ceph_msg_remove_list(struct list_head *head) 497 { 498 while (!list_empty(head)) { 499 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg, 500 list_head); 501 ceph_msg_remove(msg); 502 } 503 } 504 505 static void reset_connection(struct ceph_connection *con) 506 { 507 /* reset connection, out_queue, msg_ and connect_seq */ 508 /* discard existing out_queue and msg_seq */ 509 ceph_msg_remove_list(&con->out_queue); 510 ceph_msg_remove_list(&con->out_sent); 511 512 if (con->in_msg) { 513 BUG_ON(con->in_msg->con != con); 514 con->in_msg->con = NULL; 515 ceph_msg_put(con->in_msg); 516 con->in_msg = NULL; 517 con->ops->put(con); 518 } 519 520 con->connect_seq = 0; 521 con->out_seq = 0; 522 if (con->out_msg) { 523 ceph_msg_put(con->out_msg); 524 con->out_msg = NULL; 525 } 526 con->in_seq = 0; 527 con->in_seq_acked = 0; 528 } 529 530 /* 531 * mark a peer down. drop any open connections. 532 */ 533 void ceph_con_close(struct ceph_connection *con) 534 { 535 mutex_lock(&con->mutex); 536 dout("con_close %p peer %s\n", con, 537 ceph_pr_addr(&con->peer_addr.in_addr)); 538 con->state = CON_STATE_CLOSED; 539 540 clear_bit(CON_FLAG_LOSSYTX, &con->flags); /* so we retry next connect */ 541 clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags); 542 clear_bit(CON_FLAG_WRITE_PENDING, &con->flags); 543 clear_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags); 544 clear_bit(CON_FLAG_BACKOFF, &con->flags); 545 546 reset_connection(con); 547 con->peer_global_seq = 0; 548 cancel_delayed_work(&con->work); 549 con_close_socket(con); 550 mutex_unlock(&con->mutex); 551 } 552 EXPORT_SYMBOL(ceph_con_close); 553 554 /* 555 * Reopen a closed connection, with a new peer address. 556 */ 557 void ceph_con_open(struct ceph_connection *con, 558 __u8 entity_type, __u64 entity_num, 559 struct ceph_entity_addr *addr) 560 { 561 mutex_lock(&con->mutex); 562 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr)); 563 564 BUG_ON(con->state != CON_STATE_CLOSED); 565 con->state = CON_STATE_PREOPEN; 566 567 con->peer_name.type = (__u8) entity_type; 568 con->peer_name.num = cpu_to_le64(entity_num); 569 570 memcpy(&con->peer_addr, addr, sizeof(*addr)); 571 con->delay = 0; /* reset backoff memory */ 572 mutex_unlock(&con->mutex); 573 queue_con(con); 574 } 575 EXPORT_SYMBOL(ceph_con_open); 576 577 /* 578 * return true if this connection ever successfully opened 579 */ 580 bool ceph_con_opened(struct ceph_connection *con) 581 { 582 return con->connect_seq > 0; 583 } 584 585 /* 586 * initialize a new connection. 587 */ 588 void ceph_con_init(struct ceph_connection *con, void *private, 589 const struct ceph_connection_operations *ops, 590 struct ceph_messenger *msgr) 591 { 592 dout("con_init %p\n", con); 593 memset(con, 0, sizeof(*con)); 594 con->private = private; 595 con->ops = ops; 596 con->msgr = msgr; 597 598 con_sock_state_init(con); 599 600 mutex_init(&con->mutex); 601 INIT_LIST_HEAD(&con->out_queue); 602 INIT_LIST_HEAD(&con->out_sent); 603 INIT_DELAYED_WORK(&con->work, con_work); 604 605 con->state = CON_STATE_CLOSED; 606 } 607 EXPORT_SYMBOL(ceph_con_init); 608 609 610 /* 611 * We maintain a global counter to order connection attempts. Get 612 * a unique seq greater than @gt. 613 */ 614 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt) 615 { 616 u32 ret; 617 618 spin_lock(&msgr->global_seq_lock); 619 if (msgr->global_seq < gt) 620 msgr->global_seq = gt; 621 ret = ++msgr->global_seq; 622 spin_unlock(&msgr->global_seq_lock); 623 return ret; 624 } 625 626 static void con_out_kvec_reset(struct ceph_connection *con) 627 { 628 con->out_kvec_left = 0; 629 con->out_kvec_bytes = 0; 630 con->out_kvec_cur = &con->out_kvec[0]; 631 } 632 633 static void con_out_kvec_add(struct ceph_connection *con, 634 size_t size, void *data) 635 { 636 int index; 637 638 index = con->out_kvec_left; 639 BUG_ON(index >= ARRAY_SIZE(con->out_kvec)); 640 641 con->out_kvec[index].iov_len = size; 642 con->out_kvec[index].iov_base = data; 643 con->out_kvec_left++; 644 con->out_kvec_bytes += size; 645 } 646 647 #ifdef CONFIG_BLOCK 648 static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg) 649 { 650 if (!bio) { 651 *iter = NULL; 652 *seg = 0; 653 return; 654 } 655 *iter = bio; 656 *seg = bio->bi_idx; 657 } 658 659 static void iter_bio_next(struct bio **bio_iter, int *seg) 660 { 661 if (*bio_iter == NULL) 662 return; 663 664 BUG_ON(*seg >= (*bio_iter)->bi_vcnt); 665 666 (*seg)++; 667 if (*seg == (*bio_iter)->bi_vcnt) 668 init_bio_iter((*bio_iter)->bi_next, bio_iter, seg); 669 } 670 #endif 671 672 static void prepare_write_message_data(struct ceph_connection *con) 673 { 674 struct ceph_msg *msg = con->out_msg; 675 676 BUG_ON(!msg); 677 BUG_ON(!msg->hdr.data_len); 678 679 /* initialize page iterator */ 680 con->out_msg_pos.page = 0; 681 if (msg->pages) 682 con->out_msg_pos.page_pos = msg->page_alignment; 683 else 684 con->out_msg_pos.page_pos = 0; 685 #ifdef CONFIG_BLOCK 686 if (msg->bio) 687 init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg); 688 #endif 689 con->out_msg_pos.data_pos = 0; 690 con->out_msg_pos.did_page_crc = false; 691 con->out_more = 1; /* data + footer will follow */ 692 } 693 694 /* 695 * Prepare footer for currently outgoing message, and finish things 696 * off. Assumes out_kvec* are already valid.. we just add on to the end. 697 */ 698 static void prepare_write_message_footer(struct ceph_connection *con) 699 { 700 struct ceph_msg *m = con->out_msg; 701 int v = con->out_kvec_left; 702 703 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE; 704 705 dout("prepare_write_message_footer %p\n", con); 706 con->out_kvec_is_msg = true; 707 con->out_kvec[v].iov_base = &m->footer; 708 con->out_kvec[v].iov_len = sizeof(m->footer); 709 con->out_kvec_bytes += sizeof(m->footer); 710 con->out_kvec_left++; 711 con->out_more = m->more_to_follow; 712 con->out_msg_done = true; 713 } 714 715 /* 716 * Prepare headers for the next outgoing message. 717 */ 718 static void prepare_write_message(struct ceph_connection *con) 719 { 720 struct ceph_msg *m; 721 u32 crc; 722 723 con_out_kvec_reset(con); 724 con->out_kvec_is_msg = true; 725 con->out_msg_done = false; 726 727 /* Sneak an ack in there first? If we can get it into the same 728 * TCP packet that's a good thing. */ 729 if (con->in_seq > con->in_seq_acked) { 730 con->in_seq_acked = con->in_seq; 731 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 732 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 733 con_out_kvec_add(con, sizeof (con->out_temp_ack), 734 &con->out_temp_ack); 735 } 736 737 BUG_ON(list_empty(&con->out_queue)); 738 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head); 739 con->out_msg = m; 740 BUG_ON(m->con != con); 741 742 /* put message on sent list */ 743 ceph_msg_get(m); 744 list_move_tail(&m->list_head, &con->out_sent); 745 746 /* 747 * only assign outgoing seq # if we haven't sent this message 748 * yet. if it is requeued, resend with it's original seq. 749 */ 750 if (m->needs_out_seq) { 751 m->hdr.seq = cpu_to_le64(++con->out_seq); 752 m->needs_out_seq = false; 753 } 754 #ifdef CONFIG_BLOCK 755 else 756 m->bio_iter = NULL; 757 #endif 758 759 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n", 760 m, con->out_seq, le16_to_cpu(m->hdr.type), 761 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), 762 le32_to_cpu(m->hdr.data_len), 763 m->nr_pages); 764 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); 765 766 /* tag + hdr + front + middle */ 767 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg); 768 con_out_kvec_add(con, sizeof (m->hdr), &m->hdr); 769 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base); 770 771 if (m->middle) 772 con_out_kvec_add(con, m->middle->vec.iov_len, 773 m->middle->vec.iov_base); 774 775 /* fill in crc (except data pages), footer */ 776 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc)); 777 con->out_msg->hdr.crc = cpu_to_le32(crc); 778 con->out_msg->footer.flags = 0; 779 780 crc = crc32c(0, m->front.iov_base, m->front.iov_len); 781 con->out_msg->footer.front_crc = cpu_to_le32(crc); 782 if (m->middle) { 783 crc = crc32c(0, m->middle->vec.iov_base, 784 m->middle->vec.iov_len); 785 con->out_msg->footer.middle_crc = cpu_to_le32(crc); 786 } else 787 con->out_msg->footer.middle_crc = 0; 788 dout("%s front_crc %u middle_crc %u\n", __func__, 789 le32_to_cpu(con->out_msg->footer.front_crc), 790 le32_to_cpu(con->out_msg->footer.middle_crc)); 791 792 /* is there a data payload? */ 793 con->out_msg->footer.data_crc = 0; 794 if (m->hdr.data_len) 795 prepare_write_message_data(con); 796 else 797 /* no, queue up footer too and be done */ 798 prepare_write_message_footer(con); 799 800 set_bit(CON_FLAG_WRITE_PENDING, &con->flags); 801 } 802 803 /* 804 * Prepare an ack. 805 */ 806 static void prepare_write_ack(struct ceph_connection *con) 807 { 808 dout("prepare_write_ack %p %llu -> %llu\n", con, 809 con->in_seq_acked, con->in_seq); 810 con->in_seq_acked = con->in_seq; 811 812 con_out_kvec_reset(con); 813 814 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack); 815 816 con->out_temp_ack = cpu_to_le64(con->in_seq_acked); 817 con_out_kvec_add(con, sizeof (con->out_temp_ack), 818 &con->out_temp_ack); 819 820 con->out_more = 1; /* more will follow.. eventually.. */ 821 set_bit(CON_FLAG_WRITE_PENDING, &con->flags); 822 } 823 824 /* 825 * Prepare to write keepalive byte. 826 */ 827 static void prepare_write_keepalive(struct ceph_connection *con) 828 { 829 dout("prepare_write_keepalive %p\n", con); 830 con_out_kvec_reset(con); 831 con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive); 832 set_bit(CON_FLAG_WRITE_PENDING, &con->flags); 833 } 834 835 /* 836 * Connection negotiation. 837 */ 838 839 static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con, 840 int *auth_proto) 841 { 842 struct ceph_auth_handshake *auth; 843 844 if (!con->ops->get_authorizer) { 845 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN; 846 con->out_connect.authorizer_len = 0; 847 return NULL; 848 } 849 850 /* Can't hold the mutex while getting authorizer */ 851 mutex_unlock(&con->mutex); 852 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry); 853 mutex_lock(&con->mutex); 854 855 if (IS_ERR(auth)) 856 return auth; 857 if (con->state != CON_STATE_NEGOTIATING) 858 return ERR_PTR(-EAGAIN); 859 860 con->auth_reply_buf = auth->authorizer_reply_buf; 861 con->auth_reply_buf_len = auth->authorizer_reply_buf_len; 862 return auth; 863 } 864 865 /* 866 * We connected to a peer and are saying hello. 867 */ 868 static void prepare_write_banner(struct ceph_connection *con) 869 { 870 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER); 871 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr), 872 &con->msgr->my_enc_addr); 873 874 con->out_more = 0; 875 set_bit(CON_FLAG_WRITE_PENDING, &con->flags); 876 } 877 878 static int prepare_write_connect(struct ceph_connection *con) 879 { 880 unsigned int global_seq = get_global_seq(con->msgr, 0); 881 int proto; 882 int auth_proto; 883 struct ceph_auth_handshake *auth; 884 885 switch (con->peer_name.type) { 886 case CEPH_ENTITY_TYPE_MON: 887 proto = CEPH_MONC_PROTOCOL; 888 break; 889 case CEPH_ENTITY_TYPE_OSD: 890 proto = CEPH_OSDC_PROTOCOL; 891 break; 892 case CEPH_ENTITY_TYPE_MDS: 893 proto = CEPH_MDSC_PROTOCOL; 894 break; 895 default: 896 BUG(); 897 } 898 899 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con, 900 con->connect_seq, global_seq, proto); 901 902 con->out_connect.features = cpu_to_le64(con->msgr->supported_features); 903 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT); 904 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq); 905 con->out_connect.global_seq = cpu_to_le32(global_seq); 906 con->out_connect.protocol_version = cpu_to_le32(proto); 907 con->out_connect.flags = 0; 908 909 auth_proto = CEPH_AUTH_UNKNOWN; 910 auth = get_connect_authorizer(con, &auth_proto); 911 if (IS_ERR(auth)) 912 return PTR_ERR(auth); 913 914 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto); 915 con->out_connect.authorizer_len = auth ? 916 cpu_to_le32(auth->authorizer_buf_len) : 0; 917 918 con_out_kvec_add(con, sizeof (con->out_connect), 919 &con->out_connect); 920 if (auth && auth->authorizer_buf_len) 921 con_out_kvec_add(con, auth->authorizer_buf_len, 922 auth->authorizer_buf); 923 924 con->out_more = 0; 925 set_bit(CON_FLAG_WRITE_PENDING, &con->flags); 926 927 return 0; 928 } 929 930 /* 931 * write as much of pending kvecs to the socket as we can. 932 * 1 -> done 933 * 0 -> socket full, but more to do 934 * <0 -> error 935 */ 936 static int write_partial_kvec(struct ceph_connection *con) 937 { 938 int ret; 939 940 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes); 941 while (con->out_kvec_bytes > 0) { 942 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur, 943 con->out_kvec_left, con->out_kvec_bytes, 944 con->out_more); 945 if (ret <= 0) 946 goto out; 947 con->out_kvec_bytes -= ret; 948 if (con->out_kvec_bytes == 0) 949 break; /* done */ 950 951 /* account for full iov entries consumed */ 952 while (ret >= con->out_kvec_cur->iov_len) { 953 BUG_ON(!con->out_kvec_left); 954 ret -= con->out_kvec_cur->iov_len; 955 con->out_kvec_cur++; 956 con->out_kvec_left--; 957 } 958 /* and for a partially-consumed entry */ 959 if (ret) { 960 con->out_kvec_cur->iov_len -= ret; 961 con->out_kvec_cur->iov_base += ret; 962 } 963 } 964 con->out_kvec_left = 0; 965 con->out_kvec_is_msg = false; 966 ret = 1; 967 out: 968 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con, 969 con->out_kvec_bytes, con->out_kvec_left, ret); 970 return ret; /* done! */ 971 } 972 973 static void out_msg_pos_next(struct ceph_connection *con, struct page *page, 974 size_t len, size_t sent, bool in_trail) 975 { 976 struct ceph_msg *msg = con->out_msg; 977 978 BUG_ON(!msg); 979 BUG_ON(!sent); 980 981 con->out_msg_pos.data_pos += sent; 982 con->out_msg_pos.page_pos += sent; 983 if (sent < len) 984 return; 985 986 BUG_ON(sent != len); 987 con->out_msg_pos.page_pos = 0; 988 con->out_msg_pos.page++; 989 con->out_msg_pos.did_page_crc = false; 990 if (in_trail) 991 list_move_tail(&page->lru, 992 &msg->trail->head); 993 else if (msg->pagelist) 994 list_move_tail(&page->lru, 995 &msg->pagelist->head); 996 #ifdef CONFIG_BLOCK 997 else if (msg->bio) 998 iter_bio_next(&msg->bio_iter, &msg->bio_seg); 999 #endif 1000 } 1001 1002 /* 1003 * Write as much message data payload as we can. If we finish, queue 1004 * up the footer. 1005 * 1 -> done, footer is now queued in out_kvec[]. 1006 * 0 -> socket full, but more to do 1007 * <0 -> error 1008 */ 1009 static int write_partial_msg_pages(struct ceph_connection *con) 1010 { 1011 struct ceph_msg *msg = con->out_msg; 1012 unsigned int data_len = le32_to_cpu(msg->hdr.data_len); 1013 size_t len; 1014 bool do_datacrc = !con->msgr->nocrc; 1015 int ret; 1016 int total_max_write; 1017 bool in_trail = false; 1018 const size_t trail_len = (msg->trail ? msg->trail->length : 0); 1019 const size_t trail_off = data_len - trail_len; 1020 1021 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n", 1022 con, msg, con->out_msg_pos.page, msg->nr_pages, 1023 con->out_msg_pos.page_pos); 1024 1025 /* 1026 * Iterate through each page that contains data to be 1027 * written, and send as much as possible for each. 1028 * 1029 * If we are calculating the data crc (the default), we will 1030 * need to map the page. If we have no pages, they have 1031 * been revoked, so use the zero page. 1032 */ 1033 while (data_len > con->out_msg_pos.data_pos) { 1034 struct page *page = NULL; 1035 int max_write = PAGE_SIZE; 1036 int bio_offset = 0; 1037 1038 in_trail = in_trail || con->out_msg_pos.data_pos >= trail_off; 1039 if (!in_trail) 1040 total_max_write = trail_off - con->out_msg_pos.data_pos; 1041 1042 if (in_trail) { 1043 total_max_write = data_len - con->out_msg_pos.data_pos; 1044 1045 page = list_first_entry(&msg->trail->head, 1046 struct page, lru); 1047 } else if (msg->pages) { 1048 page = msg->pages[con->out_msg_pos.page]; 1049 } else if (msg->pagelist) { 1050 page = list_first_entry(&msg->pagelist->head, 1051 struct page, lru); 1052 #ifdef CONFIG_BLOCK 1053 } else if (msg->bio) { 1054 struct bio_vec *bv; 1055 1056 bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg); 1057 page = bv->bv_page; 1058 bio_offset = bv->bv_offset; 1059 max_write = bv->bv_len; 1060 #endif 1061 } else { 1062 page = zero_page; 1063 } 1064 len = min_t(int, max_write - con->out_msg_pos.page_pos, 1065 total_max_write); 1066 1067 if (do_datacrc && !con->out_msg_pos.did_page_crc) { 1068 void *base; 1069 u32 crc = le32_to_cpu(msg->footer.data_crc); 1070 char *kaddr; 1071 1072 kaddr = kmap(page); 1073 BUG_ON(kaddr == NULL); 1074 base = kaddr + con->out_msg_pos.page_pos + bio_offset; 1075 crc = crc32c(crc, base, len); 1076 kunmap(page); 1077 msg->footer.data_crc = cpu_to_le32(crc); 1078 con->out_msg_pos.did_page_crc = true; 1079 } 1080 ret = ceph_tcp_sendpage(con->sock, page, 1081 con->out_msg_pos.page_pos + bio_offset, 1082 len, 1); 1083 if (ret <= 0) 1084 goto out; 1085 1086 out_msg_pos_next(con, page, len, (size_t) ret, in_trail); 1087 } 1088 1089 dout("write_partial_msg_pages %p msg %p done\n", con, msg); 1090 1091 /* prepare and queue up footer, too */ 1092 if (!do_datacrc) 1093 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; 1094 con_out_kvec_reset(con); 1095 prepare_write_message_footer(con); 1096 ret = 1; 1097 out: 1098 return ret; 1099 } 1100 1101 /* 1102 * write some zeros 1103 */ 1104 static int write_partial_skip(struct ceph_connection *con) 1105 { 1106 int ret; 1107 1108 while (con->out_skip > 0) { 1109 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE); 1110 1111 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, 1); 1112 if (ret <= 0) 1113 goto out; 1114 con->out_skip -= ret; 1115 } 1116 ret = 1; 1117 out: 1118 return ret; 1119 } 1120 1121 /* 1122 * Prepare to read connection handshake, or an ack. 1123 */ 1124 static void prepare_read_banner(struct ceph_connection *con) 1125 { 1126 dout("prepare_read_banner %p\n", con); 1127 con->in_base_pos = 0; 1128 } 1129 1130 static void prepare_read_connect(struct ceph_connection *con) 1131 { 1132 dout("prepare_read_connect %p\n", con); 1133 con->in_base_pos = 0; 1134 } 1135 1136 static void prepare_read_ack(struct ceph_connection *con) 1137 { 1138 dout("prepare_read_ack %p\n", con); 1139 con->in_base_pos = 0; 1140 } 1141 1142 static void prepare_read_tag(struct ceph_connection *con) 1143 { 1144 dout("prepare_read_tag %p\n", con); 1145 con->in_base_pos = 0; 1146 con->in_tag = CEPH_MSGR_TAG_READY; 1147 } 1148 1149 /* 1150 * Prepare to read a message. 1151 */ 1152 static int prepare_read_message(struct ceph_connection *con) 1153 { 1154 dout("prepare_read_message %p\n", con); 1155 BUG_ON(con->in_msg != NULL); 1156 con->in_base_pos = 0; 1157 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0; 1158 return 0; 1159 } 1160 1161 1162 static int read_partial(struct ceph_connection *con, 1163 int end, int size, void *object) 1164 { 1165 while (con->in_base_pos < end) { 1166 int left = end - con->in_base_pos; 1167 int have = size - left; 1168 int ret = ceph_tcp_recvmsg(con->sock, object + have, left); 1169 if (ret <= 0) 1170 return ret; 1171 con->in_base_pos += ret; 1172 } 1173 return 1; 1174 } 1175 1176 1177 /* 1178 * Read all or part of the connect-side handshake on a new connection 1179 */ 1180 static int read_partial_banner(struct ceph_connection *con) 1181 { 1182 int size; 1183 int end; 1184 int ret; 1185 1186 dout("read_partial_banner %p at %d\n", con, con->in_base_pos); 1187 1188 /* peer's banner */ 1189 size = strlen(CEPH_BANNER); 1190 end = size; 1191 ret = read_partial(con, end, size, con->in_banner); 1192 if (ret <= 0) 1193 goto out; 1194 1195 size = sizeof (con->actual_peer_addr); 1196 end += size; 1197 ret = read_partial(con, end, size, &con->actual_peer_addr); 1198 if (ret <= 0) 1199 goto out; 1200 1201 size = sizeof (con->peer_addr_for_me); 1202 end += size; 1203 ret = read_partial(con, end, size, &con->peer_addr_for_me); 1204 if (ret <= 0) 1205 goto out; 1206 1207 out: 1208 return ret; 1209 } 1210 1211 static int read_partial_connect(struct ceph_connection *con) 1212 { 1213 int size; 1214 int end; 1215 int ret; 1216 1217 dout("read_partial_connect %p at %d\n", con, con->in_base_pos); 1218 1219 size = sizeof (con->in_reply); 1220 end = size; 1221 ret = read_partial(con, end, size, &con->in_reply); 1222 if (ret <= 0) 1223 goto out; 1224 1225 size = le32_to_cpu(con->in_reply.authorizer_len); 1226 end += size; 1227 ret = read_partial(con, end, size, con->auth_reply_buf); 1228 if (ret <= 0) 1229 goto out; 1230 1231 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n", 1232 con, (int)con->in_reply.tag, 1233 le32_to_cpu(con->in_reply.connect_seq), 1234 le32_to_cpu(con->in_reply.global_seq)); 1235 out: 1236 return ret; 1237 1238 } 1239 1240 /* 1241 * Verify the hello banner looks okay. 1242 */ 1243 static int verify_hello(struct ceph_connection *con) 1244 { 1245 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) { 1246 pr_err("connect to %s got bad banner\n", 1247 ceph_pr_addr(&con->peer_addr.in_addr)); 1248 con->error_msg = "protocol error, bad banner"; 1249 return -1; 1250 } 1251 return 0; 1252 } 1253 1254 static bool addr_is_blank(struct sockaddr_storage *ss) 1255 { 1256 switch (ss->ss_family) { 1257 case AF_INET: 1258 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0; 1259 case AF_INET6: 1260 return 1261 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 && 1262 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 && 1263 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 && 1264 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0; 1265 } 1266 return false; 1267 } 1268 1269 static int addr_port(struct sockaddr_storage *ss) 1270 { 1271 switch (ss->ss_family) { 1272 case AF_INET: 1273 return ntohs(((struct sockaddr_in *)ss)->sin_port); 1274 case AF_INET6: 1275 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port); 1276 } 1277 return 0; 1278 } 1279 1280 static void addr_set_port(struct sockaddr_storage *ss, int p) 1281 { 1282 switch (ss->ss_family) { 1283 case AF_INET: 1284 ((struct sockaddr_in *)ss)->sin_port = htons(p); 1285 break; 1286 case AF_INET6: 1287 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p); 1288 break; 1289 } 1290 } 1291 1292 /* 1293 * Unlike other *_pton function semantics, zero indicates success. 1294 */ 1295 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss, 1296 char delim, const char **ipend) 1297 { 1298 struct sockaddr_in *in4 = (struct sockaddr_in *) ss; 1299 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss; 1300 1301 memset(ss, 0, sizeof(*ss)); 1302 1303 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) { 1304 ss->ss_family = AF_INET; 1305 return 0; 1306 } 1307 1308 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) { 1309 ss->ss_family = AF_INET6; 1310 return 0; 1311 } 1312 1313 return -EINVAL; 1314 } 1315 1316 /* 1317 * Extract hostname string and resolve using kernel DNS facility. 1318 */ 1319 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER 1320 static int ceph_dns_resolve_name(const char *name, size_t namelen, 1321 struct sockaddr_storage *ss, char delim, const char **ipend) 1322 { 1323 const char *end, *delim_p; 1324 char *colon_p, *ip_addr = NULL; 1325 int ip_len, ret; 1326 1327 /* 1328 * The end of the hostname occurs immediately preceding the delimiter or 1329 * the port marker (':') where the delimiter takes precedence. 1330 */ 1331 delim_p = memchr(name, delim, namelen); 1332 colon_p = memchr(name, ':', namelen); 1333 1334 if (delim_p && colon_p) 1335 end = delim_p < colon_p ? delim_p : colon_p; 1336 else if (!delim_p && colon_p) 1337 end = colon_p; 1338 else { 1339 end = delim_p; 1340 if (!end) /* case: hostname:/ */ 1341 end = name + namelen; 1342 } 1343 1344 if (end <= name) 1345 return -EINVAL; 1346 1347 /* do dns_resolve upcall */ 1348 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL); 1349 if (ip_len > 0) 1350 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL); 1351 else 1352 ret = -ESRCH; 1353 1354 kfree(ip_addr); 1355 1356 *ipend = end; 1357 1358 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name, 1359 ret, ret ? "failed" : ceph_pr_addr(ss)); 1360 1361 return ret; 1362 } 1363 #else 1364 static inline int ceph_dns_resolve_name(const char *name, size_t namelen, 1365 struct sockaddr_storage *ss, char delim, const char **ipend) 1366 { 1367 return -EINVAL; 1368 } 1369 #endif 1370 1371 /* 1372 * Parse a server name (IP or hostname). If a valid IP address is not found 1373 * then try to extract a hostname to resolve using userspace DNS upcall. 1374 */ 1375 static int ceph_parse_server_name(const char *name, size_t namelen, 1376 struct sockaddr_storage *ss, char delim, const char **ipend) 1377 { 1378 int ret; 1379 1380 ret = ceph_pton(name, namelen, ss, delim, ipend); 1381 if (ret) 1382 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend); 1383 1384 return ret; 1385 } 1386 1387 /* 1388 * Parse an ip[:port] list into an addr array. Use the default 1389 * monitor port if a port isn't specified. 1390 */ 1391 int ceph_parse_ips(const char *c, const char *end, 1392 struct ceph_entity_addr *addr, 1393 int max_count, int *count) 1394 { 1395 int i, ret = -EINVAL; 1396 const char *p = c; 1397 1398 dout("parse_ips on '%.*s'\n", (int)(end-c), c); 1399 for (i = 0; i < max_count; i++) { 1400 const char *ipend; 1401 struct sockaddr_storage *ss = &addr[i].in_addr; 1402 int port; 1403 char delim = ','; 1404 1405 if (*p == '[') { 1406 delim = ']'; 1407 p++; 1408 } 1409 1410 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend); 1411 if (ret) 1412 goto bad; 1413 ret = -EINVAL; 1414 1415 p = ipend; 1416 1417 if (delim == ']') { 1418 if (*p != ']') { 1419 dout("missing matching ']'\n"); 1420 goto bad; 1421 } 1422 p++; 1423 } 1424 1425 /* port? */ 1426 if (p < end && *p == ':') { 1427 port = 0; 1428 p++; 1429 while (p < end && *p >= '0' && *p <= '9') { 1430 port = (port * 10) + (*p - '0'); 1431 p++; 1432 } 1433 if (port > 65535 || port == 0) 1434 goto bad; 1435 } else { 1436 port = CEPH_MON_PORT; 1437 } 1438 1439 addr_set_port(ss, port); 1440 1441 dout("parse_ips got %s\n", ceph_pr_addr(ss)); 1442 1443 if (p == end) 1444 break; 1445 if (*p != ',') 1446 goto bad; 1447 p++; 1448 } 1449 1450 if (p != end) 1451 goto bad; 1452 1453 if (count) 1454 *count = i + 1; 1455 return 0; 1456 1457 bad: 1458 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c); 1459 return ret; 1460 } 1461 EXPORT_SYMBOL(ceph_parse_ips); 1462 1463 static int process_banner(struct ceph_connection *con) 1464 { 1465 dout("process_banner on %p\n", con); 1466 1467 if (verify_hello(con) < 0) 1468 return -1; 1469 1470 ceph_decode_addr(&con->actual_peer_addr); 1471 ceph_decode_addr(&con->peer_addr_for_me); 1472 1473 /* 1474 * Make sure the other end is who we wanted. note that the other 1475 * end may not yet know their ip address, so if it's 0.0.0.0, give 1476 * them the benefit of the doubt. 1477 */ 1478 if (memcmp(&con->peer_addr, &con->actual_peer_addr, 1479 sizeof(con->peer_addr)) != 0 && 1480 !(addr_is_blank(&con->actual_peer_addr.in_addr) && 1481 con->actual_peer_addr.nonce == con->peer_addr.nonce)) { 1482 pr_warning("wrong peer, want %s/%d, got %s/%d\n", 1483 ceph_pr_addr(&con->peer_addr.in_addr), 1484 (int)le32_to_cpu(con->peer_addr.nonce), 1485 ceph_pr_addr(&con->actual_peer_addr.in_addr), 1486 (int)le32_to_cpu(con->actual_peer_addr.nonce)); 1487 con->error_msg = "wrong peer at address"; 1488 return -1; 1489 } 1490 1491 /* 1492 * did we learn our address? 1493 */ 1494 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) { 1495 int port = addr_port(&con->msgr->inst.addr.in_addr); 1496 1497 memcpy(&con->msgr->inst.addr.in_addr, 1498 &con->peer_addr_for_me.in_addr, 1499 sizeof(con->peer_addr_for_me.in_addr)); 1500 addr_set_port(&con->msgr->inst.addr.in_addr, port); 1501 encode_my_addr(con->msgr); 1502 dout("process_banner learned my addr is %s\n", 1503 ceph_pr_addr(&con->msgr->inst.addr.in_addr)); 1504 } 1505 1506 return 0; 1507 } 1508 1509 static void fail_protocol(struct ceph_connection *con) 1510 { 1511 reset_connection(con); 1512 BUG_ON(con->state != CON_STATE_NEGOTIATING); 1513 con->state = CON_STATE_CLOSED; 1514 } 1515 1516 static int process_connect(struct ceph_connection *con) 1517 { 1518 u64 sup_feat = con->msgr->supported_features; 1519 u64 req_feat = con->msgr->required_features; 1520 u64 server_feat = le64_to_cpu(con->in_reply.features); 1521 int ret; 1522 1523 dout("process_connect on %p tag %d\n", con, (int)con->in_tag); 1524 1525 switch (con->in_reply.tag) { 1526 case CEPH_MSGR_TAG_FEATURES: 1527 pr_err("%s%lld %s feature set mismatch," 1528 " my %llx < server's %llx, missing %llx\n", 1529 ENTITY_NAME(con->peer_name), 1530 ceph_pr_addr(&con->peer_addr.in_addr), 1531 sup_feat, server_feat, server_feat & ~sup_feat); 1532 con->error_msg = "missing required protocol features"; 1533 fail_protocol(con); 1534 return -1; 1535 1536 case CEPH_MSGR_TAG_BADPROTOVER: 1537 pr_err("%s%lld %s protocol version mismatch," 1538 " my %d != server's %d\n", 1539 ENTITY_NAME(con->peer_name), 1540 ceph_pr_addr(&con->peer_addr.in_addr), 1541 le32_to_cpu(con->out_connect.protocol_version), 1542 le32_to_cpu(con->in_reply.protocol_version)); 1543 con->error_msg = "protocol version mismatch"; 1544 fail_protocol(con); 1545 return -1; 1546 1547 case CEPH_MSGR_TAG_BADAUTHORIZER: 1548 con->auth_retry++; 1549 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con, 1550 con->auth_retry); 1551 if (con->auth_retry == 2) { 1552 con->error_msg = "connect authorization failure"; 1553 return -1; 1554 } 1555 con->auth_retry = 1; 1556 con_out_kvec_reset(con); 1557 ret = prepare_write_connect(con); 1558 if (ret < 0) 1559 return ret; 1560 prepare_read_connect(con); 1561 break; 1562 1563 case CEPH_MSGR_TAG_RESETSESSION: 1564 /* 1565 * If we connected with a large connect_seq but the peer 1566 * has no record of a session with us (no connection, or 1567 * connect_seq == 0), they will send RESETSESION to indicate 1568 * that they must have reset their session, and may have 1569 * dropped messages. 1570 */ 1571 dout("process_connect got RESET peer seq %u\n", 1572 le32_to_cpu(con->in_reply.connect_seq)); 1573 pr_err("%s%lld %s connection reset\n", 1574 ENTITY_NAME(con->peer_name), 1575 ceph_pr_addr(&con->peer_addr.in_addr)); 1576 reset_connection(con); 1577 con_out_kvec_reset(con); 1578 ret = prepare_write_connect(con); 1579 if (ret < 0) 1580 return ret; 1581 prepare_read_connect(con); 1582 1583 /* Tell ceph about it. */ 1584 mutex_unlock(&con->mutex); 1585 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name)); 1586 if (con->ops->peer_reset) 1587 con->ops->peer_reset(con); 1588 mutex_lock(&con->mutex); 1589 if (con->state != CON_STATE_NEGOTIATING) 1590 return -EAGAIN; 1591 break; 1592 1593 case CEPH_MSGR_TAG_RETRY_SESSION: 1594 /* 1595 * If we sent a smaller connect_seq than the peer has, try 1596 * again with a larger value. 1597 */ 1598 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n", 1599 le32_to_cpu(con->out_connect.connect_seq), 1600 le32_to_cpu(con->in_reply.connect_seq)); 1601 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq); 1602 con_out_kvec_reset(con); 1603 ret = prepare_write_connect(con); 1604 if (ret < 0) 1605 return ret; 1606 prepare_read_connect(con); 1607 break; 1608 1609 case CEPH_MSGR_TAG_RETRY_GLOBAL: 1610 /* 1611 * If we sent a smaller global_seq than the peer has, try 1612 * again with a larger value. 1613 */ 1614 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n", 1615 con->peer_global_seq, 1616 le32_to_cpu(con->in_reply.global_seq)); 1617 get_global_seq(con->msgr, 1618 le32_to_cpu(con->in_reply.global_seq)); 1619 con_out_kvec_reset(con); 1620 ret = prepare_write_connect(con); 1621 if (ret < 0) 1622 return ret; 1623 prepare_read_connect(con); 1624 break; 1625 1626 case CEPH_MSGR_TAG_READY: 1627 if (req_feat & ~server_feat) { 1628 pr_err("%s%lld %s protocol feature mismatch," 1629 " my required %llx > server's %llx, need %llx\n", 1630 ENTITY_NAME(con->peer_name), 1631 ceph_pr_addr(&con->peer_addr.in_addr), 1632 req_feat, server_feat, req_feat & ~server_feat); 1633 con->error_msg = "missing required protocol features"; 1634 fail_protocol(con); 1635 return -1; 1636 } 1637 1638 BUG_ON(con->state != CON_STATE_NEGOTIATING); 1639 con->state = CON_STATE_OPEN; 1640 1641 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); 1642 con->connect_seq++; 1643 con->peer_features = server_feat; 1644 dout("process_connect got READY gseq %d cseq %d (%d)\n", 1645 con->peer_global_seq, 1646 le32_to_cpu(con->in_reply.connect_seq), 1647 con->connect_seq); 1648 WARN_ON(con->connect_seq != 1649 le32_to_cpu(con->in_reply.connect_seq)); 1650 1651 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY) 1652 set_bit(CON_FLAG_LOSSYTX, &con->flags); 1653 1654 con->delay = 0; /* reset backoff memory */ 1655 1656 prepare_read_tag(con); 1657 break; 1658 1659 case CEPH_MSGR_TAG_WAIT: 1660 /* 1661 * If there is a connection race (we are opening 1662 * connections to each other), one of us may just have 1663 * to WAIT. This shouldn't happen if we are the 1664 * client. 1665 */ 1666 pr_err("process_connect got WAIT as client\n"); 1667 con->error_msg = "protocol error, got WAIT as client"; 1668 return -1; 1669 1670 default: 1671 pr_err("connect protocol error, will retry\n"); 1672 con->error_msg = "protocol error, garbage tag during connect"; 1673 return -1; 1674 } 1675 return 0; 1676 } 1677 1678 1679 /* 1680 * read (part of) an ack 1681 */ 1682 static int read_partial_ack(struct ceph_connection *con) 1683 { 1684 int size = sizeof (con->in_temp_ack); 1685 int end = size; 1686 1687 return read_partial(con, end, size, &con->in_temp_ack); 1688 } 1689 1690 1691 /* 1692 * We can finally discard anything that's been acked. 1693 */ 1694 static void process_ack(struct ceph_connection *con) 1695 { 1696 struct ceph_msg *m; 1697 u64 ack = le64_to_cpu(con->in_temp_ack); 1698 u64 seq; 1699 1700 while (!list_empty(&con->out_sent)) { 1701 m = list_first_entry(&con->out_sent, struct ceph_msg, 1702 list_head); 1703 seq = le64_to_cpu(m->hdr.seq); 1704 if (seq > ack) 1705 break; 1706 dout("got ack for seq %llu type %d at %p\n", seq, 1707 le16_to_cpu(m->hdr.type), m); 1708 m->ack_stamp = jiffies; 1709 ceph_msg_remove(m); 1710 } 1711 prepare_read_tag(con); 1712 } 1713 1714 1715 1716 1717 static int read_partial_message_section(struct ceph_connection *con, 1718 struct kvec *section, 1719 unsigned int sec_len, u32 *crc) 1720 { 1721 int ret, left; 1722 1723 BUG_ON(!section); 1724 1725 while (section->iov_len < sec_len) { 1726 BUG_ON(section->iov_base == NULL); 1727 left = sec_len - section->iov_len; 1728 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base + 1729 section->iov_len, left); 1730 if (ret <= 0) 1731 return ret; 1732 section->iov_len += ret; 1733 } 1734 if (section->iov_len == sec_len) 1735 *crc = crc32c(0, section->iov_base, section->iov_len); 1736 1737 return 1; 1738 } 1739 1740 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip); 1741 1742 static int read_partial_message_pages(struct ceph_connection *con, 1743 struct page **pages, 1744 unsigned int data_len, bool do_datacrc) 1745 { 1746 void *p; 1747 int ret; 1748 int left; 1749 1750 left = min((int)(data_len - con->in_msg_pos.data_pos), 1751 (int)(PAGE_SIZE - con->in_msg_pos.page_pos)); 1752 /* (page) data */ 1753 BUG_ON(pages == NULL); 1754 p = kmap(pages[con->in_msg_pos.page]); 1755 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, 1756 left); 1757 if (ret > 0 && do_datacrc) 1758 con->in_data_crc = 1759 crc32c(con->in_data_crc, 1760 p + con->in_msg_pos.page_pos, ret); 1761 kunmap(pages[con->in_msg_pos.page]); 1762 if (ret <= 0) 1763 return ret; 1764 con->in_msg_pos.data_pos += ret; 1765 con->in_msg_pos.page_pos += ret; 1766 if (con->in_msg_pos.page_pos == PAGE_SIZE) { 1767 con->in_msg_pos.page_pos = 0; 1768 con->in_msg_pos.page++; 1769 } 1770 1771 return ret; 1772 } 1773 1774 #ifdef CONFIG_BLOCK 1775 static int read_partial_message_bio(struct ceph_connection *con, 1776 struct bio **bio_iter, int *bio_seg, 1777 unsigned int data_len, bool do_datacrc) 1778 { 1779 struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg); 1780 void *p; 1781 int ret, left; 1782 1783 left = min((int)(data_len - con->in_msg_pos.data_pos), 1784 (int)(bv->bv_len - con->in_msg_pos.page_pos)); 1785 1786 p = kmap(bv->bv_page) + bv->bv_offset; 1787 1788 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, 1789 left); 1790 if (ret > 0 && do_datacrc) 1791 con->in_data_crc = 1792 crc32c(con->in_data_crc, 1793 p + con->in_msg_pos.page_pos, ret); 1794 kunmap(bv->bv_page); 1795 if (ret <= 0) 1796 return ret; 1797 con->in_msg_pos.data_pos += ret; 1798 con->in_msg_pos.page_pos += ret; 1799 if (con->in_msg_pos.page_pos == bv->bv_len) { 1800 con->in_msg_pos.page_pos = 0; 1801 iter_bio_next(bio_iter, bio_seg); 1802 } 1803 1804 return ret; 1805 } 1806 #endif 1807 1808 /* 1809 * read (part of) a message. 1810 */ 1811 static int read_partial_message(struct ceph_connection *con) 1812 { 1813 struct ceph_msg *m = con->in_msg; 1814 int size; 1815 int end; 1816 int ret; 1817 unsigned int front_len, middle_len, data_len; 1818 bool do_datacrc = !con->msgr->nocrc; 1819 u64 seq; 1820 u32 crc; 1821 1822 dout("read_partial_message con %p msg %p\n", con, m); 1823 1824 /* header */ 1825 size = sizeof (con->in_hdr); 1826 end = size; 1827 ret = read_partial(con, end, size, &con->in_hdr); 1828 if (ret <= 0) 1829 return ret; 1830 1831 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc)); 1832 if (cpu_to_le32(crc) != con->in_hdr.crc) { 1833 pr_err("read_partial_message bad hdr " 1834 " crc %u != expected %u\n", 1835 crc, con->in_hdr.crc); 1836 return -EBADMSG; 1837 } 1838 1839 front_len = le32_to_cpu(con->in_hdr.front_len); 1840 if (front_len > CEPH_MSG_MAX_FRONT_LEN) 1841 return -EIO; 1842 middle_len = le32_to_cpu(con->in_hdr.middle_len); 1843 if (middle_len > CEPH_MSG_MAX_DATA_LEN) 1844 return -EIO; 1845 data_len = le32_to_cpu(con->in_hdr.data_len); 1846 if (data_len > CEPH_MSG_MAX_DATA_LEN) 1847 return -EIO; 1848 1849 /* verify seq# */ 1850 seq = le64_to_cpu(con->in_hdr.seq); 1851 if ((s64)seq - (s64)con->in_seq < 1) { 1852 pr_info("skipping %s%lld %s seq %lld expected %lld\n", 1853 ENTITY_NAME(con->peer_name), 1854 ceph_pr_addr(&con->peer_addr.in_addr), 1855 seq, con->in_seq + 1); 1856 con->in_base_pos = -front_len - middle_len - data_len - 1857 sizeof(m->footer); 1858 con->in_tag = CEPH_MSGR_TAG_READY; 1859 return 0; 1860 } else if ((s64)seq - (s64)con->in_seq > 1) { 1861 pr_err("read_partial_message bad seq %lld expected %lld\n", 1862 seq, con->in_seq + 1); 1863 con->error_msg = "bad message sequence # for incoming message"; 1864 return -EBADMSG; 1865 } 1866 1867 /* allocate message? */ 1868 if (!con->in_msg) { 1869 int skip = 0; 1870 1871 dout("got hdr type %d front %d data %d\n", con->in_hdr.type, 1872 con->in_hdr.front_len, con->in_hdr.data_len); 1873 ret = ceph_con_in_msg_alloc(con, &skip); 1874 if (ret < 0) 1875 return ret; 1876 if (skip) { 1877 /* skip this message */ 1878 dout("alloc_msg said skip message\n"); 1879 BUG_ON(con->in_msg); 1880 con->in_base_pos = -front_len - middle_len - data_len - 1881 sizeof(m->footer); 1882 con->in_tag = CEPH_MSGR_TAG_READY; 1883 con->in_seq++; 1884 return 0; 1885 } 1886 1887 BUG_ON(!con->in_msg); 1888 BUG_ON(con->in_msg->con != con); 1889 m = con->in_msg; 1890 m->front.iov_len = 0; /* haven't read it yet */ 1891 if (m->middle) 1892 m->middle->vec.iov_len = 0; 1893 1894 con->in_msg_pos.page = 0; 1895 if (m->pages) 1896 con->in_msg_pos.page_pos = m->page_alignment; 1897 else 1898 con->in_msg_pos.page_pos = 0; 1899 con->in_msg_pos.data_pos = 0; 1900 1901 #ifdef CONFIG_BLOCK 1902 if (m->bio) 1903 init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg); 1904 #endif 1905 } 1906 1907 /* front */ 1908 ret = read_partial_message_section(con, &m->front, front_len, 1909 &con->in_front_crc); 1910 if (ret <= 0) 1911 return ret; 1912 1913 /* middle */ 1914 if (m->middle) { 1915 ret = read_partial_message_section(con, &m->middle->vec, 1916 middle_len, 1917 &con->in_middle_crc); 1918 if (ret <= 0) 1919 return ret; 1920 } 1921 1922 /* (page) data */ 1923 while (con->in_msg_pos.data_pos < data_len) { 1924 if (m->pages) { 1925 ret = read_partial_message_pages(con, m->pages, 1926 data_len, do_datacrc); 1927 if (ret <= 0) 1928 return ret; 1929 #ifdef CONFIG_BLOCK 1930 } else if (m->bio) { 1931 BUG_ON(!m->bio_iter); 1932 ret = read_partial_message_bio(con, 1933 &m->bio_iter, &m->bio_seg, 1934 data_len, do_datacrc); 1935 if (ret <= 0) 1936 return ret; 1937 #endif 1938 } else { 1939 BUG_ON(1); 1940 } 1941 } 1942 1943 /* footer */ 1944 size = sizeof (m->footer); 1945 end += size; 1946 ret = read_partial(con, end, size, &m->footer); 1947 if (ret <= 0) 1948 return ret; 1949 1950 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n", 1951 m, front_len, m->footer.front_crc, middle_len, 1952 m->footer.middle_crc, data_len, m->footer.data_crc); 1953 1954 /* crc ok? */ 1955 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) { 1956 pr_err("read_partial_message %p front crc %u != exp. %u\n", 1957 m, con->in_front_crc, m->footer.front_crc); 1958 return -EBADMSG; 1959 } 1960 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) { 1961 pr_err("read_partial_message %p middle crc %u != exp %u\n", 1962 m, con->in_middle_crc, m->footer.middle_crc); 1963 return -EBADMSG; 1964 } 1965 if (do_datacrc && 1966 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 && 1967 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) { 1968 pr_err("read_partial_message %p data crc %u != exp. %u\n", m, 1969 con->in_data_crc, le32_to_cpu(m->footer.data_crc)); 1970 return -EBADMSG; 1971 } 1972 1973 return 1; /* done! */ 1974 } 1975 1976 /* 1977 * Process message. This happens in the worker thread. The callback should 1978 * be careful not to do anything that waits on other incoming messages or it 1979 * may deadlock. 1980 */ 1981 static void process_message(struct ceph_connection *con) 1982 { 1983 struct ceph_msg *msg; 1984 1985 BUG_ON(con->in_msg->con != con); 1986 con->in_msg->con = NULL; 1987 msg = con->in_msg; 1988 con->in_msg = NULL; 1989 con->ops->put(con); 1990 1991 /* if first message, set peer_name */ 1992 if (con->peer_name.type == 0) 1993 con->peer_name = msg->hdr.src; 1994 1995 con->in_seq++; 1996 mutex_unlock(&con->mutex); 1997 1998 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n", 1999 msg, le64_to_cpu(msg->hdr.seq), 2000 ENTITY_NAME(msg->hdr.src), 2001 le16_to_cpu(msg->hdr.type), 2002 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2003 le32_to_cpu(msg->hdr.front_len), 2004 le32_to_cpu(msg->hdr.data_len), 2005 con->in_front_crc, con->in_middle_crc, con->in_data_crc); 2006 con->ops->dispatch(con, msg); 2007 2008 mutex_lock(&con->mutex); 2009 } 2010 2011 2012 /* 2013 * Write something to the socket. Called in a worker thread when the 2014 * socket appears to be writeable and we have something ready to send. 2015 */ 2016 static int try_write(struct ceph_connection *con) 2017 { 2018 int ret = 1; 2019 2020 dout("try_write start %p state %lu\n", con, con->state); 2021 2022 more: 2023 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes); 2024 2025 /* open the socket first? */ 2026 if (con->state == CON_STATE_PREOPEN) { 2027 BUG_ON(con->sock); 2028 con->state = CON_STATE_CONNECTING; 2029 2030 con_out_kvec_reset(con); 2031 prepare_write_banner(con); 2032 prepare_read_banner(con); 2033 2034 BUG_ON(con->in_msg); 2035 con->in_tag = CEPH_MSGR_TAG_READY; 2036 dout("try_write initiating connect on %p new state %lu\n", 2037 con, con->state); 2038 ret = ceph_tcp_connect(con); 2039 if (ret < 0) { 2040 con->error_msg = "connect error"; 2041 goto out; 2042 } 2043 } 2044 2045 more_kvec: 2046 /* kvec data queued? */ 2047 if (con->out_skip) { 2048 ret = write_partial_skip(con); 2049 if (ret <= 0) 2050 goto out; 2051 } 2052 if (con->out_kvec_left) { 2053 ret = write_partial_kvec(con); 2054 if (ret <= 0) 2055 goto out; 2056 } 2057 2058 /* msg pages? */ 2059 if (con->out_msg) { 2060 if (con->out_msg_done) { 2061 ceph_msg_put(con->out_msg); 2062 con->out_msg = NULL; /* we're done with this one */ 2063 goto do_next; 2064 } 2065 2066 ret = write_partial_msg_pages(con); 2067 if (ret == 1) 2068 goto more_kvec; /* we need to send the footer, too! */ 2069 if (ret == 0) 2070 goto out; 2071 if (ret < 0) { 2072 dout("try_write write_partial_msg_pages err %d\n", 2073 ret); 2074 goto out; 2075 } 2076 } 2077 2078 do_next: 2079 if (con->state == CON_STATE_OPEN) { 2080 /* is anything else pending? */ 2081 if (!list_empty(&con->out_queue)) { 2082 prepare_write_message(con); 2083 goto more; 2084 } 2085 if (con->in_seq > con->in_seq_acked) { 2086 prepare_write_ack(con); 2087 goto more; 2088 } 2089 if (test_and_clear_bit(CON_FLAG_KEEPALIVE_PENDING, 2090 &con->flags)) { 2091 prepare_write_keepalive(con); 2092 goto more; 2093 } 2094 } 2095 2096 /* Nothing to do! */ 2097 clear_bit(CON_FLAG_WRITE_PENDING, &con->flags); 2098 dout("try_write nothing else to write.\n"); 2099 ret = 0; 2100 out: 2101 dout("try_write done on %p ret %d\n", con, ret); 2102 return ret; 2103 } 2104 2105 2106 2107 /* 2108 * Read what we can from the socket. 2109 */ 2110 static int try_read(struct ceph_connection *con) 2111 { 2112 int ret = -1; 2113 2114 more: 2115 dout("try_read start on %p state %lu\n", con, con->state); 2116 if (con->state != CON_STATE_CONNECTING && 2117 con->state != CON_STATE_NEGOTIATING && 2118 con->state != CON_STATE_OPEN) 2119 return 0; 2120 2121 BUG_ON(!con->sock); 2122 2123 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag, 2124 con->in_base_pos); 2125 2126 if (con->state == CON_STATE_CONNECTING) { 2127 dout("try_read connecting\n"); 2128 ret = read_partial_banner(con); 2129 if (ret <= 0) 2130 goto out; 2131 ret = process_banner(con); 2132 if (ret < 0) 2133 goto out; 2134 2135 BUG_ON(con->state != CON_STATE_CONNECTING); 2136 con->state = CON_STATE_NEGOTIATING; 2137 2138 /* 2139 * Received banner is good, exchange connection info. 2140 * Do not reset out_kvec, as sending our banner raced 2141 * with receiving peer banner after connect completed. 2142 */ 2143 ret = prepare_write_connect(con); 2144 if (ret < 0) 2145 goto out; 2146 prepare_read_connect(con); 2147 2148 /* Send connection info before awaiting response */ 2149 goto out; 2150 } 2151 2152 if (con->state == CON_STATE_NEGOTIATING) { 2153 dout("try_read negotiating\n"); 2154 ret = read_partial_connect(con); 2155 if (ret <= 0) 2156 goto out; 2157 ret = process_connect(con); 2158 if (ret < 0) 2159 goto out; 2160 goto more; 2161 } 2162 2163 BUG_ON(con->state != CON_STATE_OPEN); 2164 2165 if (con->in_base_pos < 0) { 2166 /* 2167 * skipping + discarding content. 2168 * 2169 * FIXME: there must be a better way to do this! 2170 */ 2171 static char buf[SKIP_BUF_SIZE]; 2172 int skip = min((int) sizeof (buf), -con->in_base_pos); 2173 2174 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); 2175 ret = ceph_tcp_recvmsg(con->sock, buf, skip); 2176 if (ret <= 0) 2177 goto out; 2178 con->in_base_pos += ret; 2179 if (con->in_base_pos) 2180 goto more; 2181 } 2182 if (con->in_tag == CEPH_MSGR_TAG_READY) { 2183 /* 2184 * what's next? 2185 */ 2186 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); 2187 if (ret <= 0) 2188 goto out; 2189 dout("try_read got tag %d\n", (int)con->in_tag); 2190 switch (con->in_tag) { 2191 case CEPH_MSGR_TAG_MSG: 2192 prepare_read_message(con); 2193 break; 2194 case CEPH_MSGR_TAG_ACK: 2195 prepare_read_ack(con); 2196 break; 2197 case CEPH_MSGR_TAG_CLOSE: 2198 con_close_socket(con); 2199 con->state = CON_STATE_CLOSED; 2200 goto out; 2201 default: 2202 goto bad_tag; 2203 } 2204 } 2205 if (con->in_tag == CEPH_MSGR_TAG_MSG) { 2206 ret = read_partial_message(con); 2207 if (ret <= 0) { 2208 switch (ret) { 2209 case -EBADMSG: 2210 con->error_msg = "bad crc"; 2211 ret = -EIO; 2212 break; 2213 case -EIO: 2214 con->error_msg = "io error"; 2215 break; 2216 } 2217 goto out; 2218 } 2219 if (con->in_tag == CEPH_MSGR_TAG_READY) 2220 goto more; 2221 process_message(con); 2222 if (con->state == CON_STATE_OPEN) 2223 prepare_read_tag(con); 2224 goto more; 2225 } 2226 if (con->in_tag == CEPH_MSGR_TAG_ACK) { 2227 ret = read_partial_ack(con); 2228 if (ret <= 0) 2229 goto out; 2230 process_ack(con); 2231 goto more; 2232 } 2233 2234 out: 2235 dout("try_read done on %p ret %d\n", con, ret); 2236 return ret; 2237 2238 bad_tag: 2239 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag); 2240 con->error_msg = "protocol error, garbage tag"; 2241 ret = -1; 2242 goto out; 2243 } 2244 2245 2246 /* 2247 * Atomically queue work on a connection. Bump @con reference to 2248 * avoid races with connection teardown. 2249 */ 2250 static void queue_con(struct ceph_connection *con) 2251 { 2252 if (!con->ops->get(con)) { 2253 dout("queue_con %p ref count 0\n", con); 2254 return; 2255 } 2256 2257 if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) { 2258 dout("queue_con %p - already queued\n", con); 2259 con->ops->put(con); 2260 } else { 2261 dout("queue_con %p\n", con); 2262 } 2263 } 2264 2265 /* 2266 * Do some work on a connection. Drop a connection ref when we're done. 2267 */ 2268 static void con_work(struct work_struct *work) 2269 { 2270 struct ceph_connection *con = container_of(work, struct ceph_connection, 2271 work.work); 2272 int ret; 2273 2274 mutex_lock(&con->mutex); 2275 restart: 2276 if (test_and_clear_bit(CON_FLAG_SOCK_CLOSED, &con->flags)) { 2277 switch (con->state) { 2278 case CON_STATE_CONNECTING: 2279 con->error_msg = "connection failed"; 2280 break; 2281 case CON_STATE_NEGOTIATING: 2282 con->error_msg = "negotiation failed"; 2283 break; 2284 case CON_STATE_OPEN: 2285 con->error_msg = "socket closed"; 2286 break; 2287 default: 2288 dout("unrecognized con state %d\n", (int)con->state); 2289 con->error_msg = "unrecognized con state"; 2290 BUG(); 2291 } 2292 goto fault; 2293 } 2294 2295 if (test_and_clear_bit(CON_FLAG_BACKOFF, &con->flags)) { 2296 dout("con_work %p backing off\n", con); 2297 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2298 round_jiffies_relative(con->delay))) { 2299 dout("con_work %p backoff %lu\n", con, con->delay); 2300 mutex_unlock(&con->mutex); 2301 return; 2302 } else { 2303 con->ops->put(con); 2304 dout("con_work %p FAILED to back off %lu\n", con, 2305 con->delay); 2306 } 2307 } 2308 2309 if (con->state == CON_STATE_STANDBY) { 2310 dout("con_work %p STANDBY\n", con); 2311 goto done; 2312 } 2313 if (con->state == CON_STATE_CLOSED) { 2314 dout("con_work %p CLOSED\n", con); 2315 BUG_ON(con->sock); 2316 goto done; 2317 } 2318 if (con->state == CON_STATE_PREOPEN) { 2319 dout("con_work OPENING\n"); 2320 BUG_ON(con->sock); 2321 } 2322 2323 ret = try_read(con); 2324 if (ret == -EAGAIN) 2325 goto restart; 2326 if (ret < 0) { 2327 con->error_msg = "socket error on read"; 2328 goto fault; 2329 } 2330 2331 ret = try_write(con); 2332 if (ret == -EAGAIN) 2333 goto restart; 2334 if (ret < 0) { 2335 con->error_msg = "socket error on write"; 2336 goto fault; 2337 } 2338 2339 done: 2340 mutex_unlock(&con->mutex); 2341 done_unlocked: 2342 con->ops->put(con); 2343 return; 2344 2345 fault: 2346 ceph_fault(con); /* error/fault path */ 2347 goto done_unlocked; 2348 } 2349 2350 2351 /* 2352 * Generic error/fault handler. A retry mechanism is used with 2353 * exponential backoff 2354 */ 2355 static void ceph_fault(struct ceph_connection *con) 2356 __releases(con->mutex) 2357 { 2358 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name), 2359 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg); 2360 dout("fault %p state %lu to peer %s\n", 2361 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr)); 2362 2363 BUG_ON(con->state != CON_STATE_CONNECTING && 2364 con->state != CON_STATE_NEGOTIATING && 2365 con->state != CON_STATE_OPEN); 2366 2367 con_close_socket(con); 2368 2369 if (test_bit(CON_FLAG_LOSSYTX, &con->flags)) { 2370 dout("fault on LOSSYTX channel, marking CLOSED\n"); 2371 con->state = CON_STATE_CLOSED; 2372 goto out_unlock; 2373 } 2374 2375 if (con->in_msg) { 2376 BUG_ON(con->in_msg->con != con); 2377 con->in_msg->con = NULL; 2378 ceph_msg_put(con->in_msg); 2379 con->in_msg = NULL; 2380 con->ops->put(con); 2381 } 2382 2383 /* Requeue anything that hasn't been acked */ 2384 list_splice_init(&con->out_sent, &con->out_queue); 2385 2386 /* If there are no messages queued or keepalive pending, place 2387 * the connection in a STANDBY state */ 2388 if (list_empty(&con->out_queue) && 2389 !test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags)) { 2390 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); 2391 clear_bit(CON_FLAG_WRITE_PENDING, &con->flags); 2392 con->state = CON_STATE_STANDBY; 2393 } else { 2394 /* retry after a delay. */ 2395 con->state = CON_STATE_PREOPEN; 2396 if (con->delay == 0) 2397 con->delay = BASE_DELAY_INTERVAL; 2398 else if (con->delay < MAX_DELAY_INTERVAL) 2399 con->delay *= 2; 2400 con->ops->get(con); 2401 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2402 round_jiffies_relative(con->delay))) { 2403 dout("fault queued %p delay %lu\n", con, con->delay); 2404 } else { 2405 con->ops->put(con); 2406 dout("fault failed to queue %p delay %lu, backoff\n", 2407 con, con->delay); 2408 /* 2409 * In many cases we see a socket state change 2410 * while con_work is running and end up 2411 * queuing (non-delayed) work, such that we 2412 * can't backoff with a delay. Set a flag so 2413 * that when con_work restarts we schedule the 2414 * delay then. 2415 */ 2416 set_bit(CON_FLAG_BACKOFF, &con->flags); 2417 } 2418 } 2419 2420 out_unlock: 2421 mutex_unlock(&con->mutex); 2422 /* 2423 * in case we faulted due to authentication, invalidate our 2424 * current tickets so that we can get new ones. 2425 */ 2426 if (con->auth_retry && con->ops->invalidate_authorizer) { 2427 dout("calling invalidate_authorizer()\n"); 2428 con->ops->invalidate_authorizer(con); 2429 } 2430 2431 if (con->ops->fault) 2432 con->ops->fault(con); 2433 } 2434 2435 2436 2437 /* 2438 * initialize a new messenger instance 2439 */ 2440 void ceph_messenger_init(struct ceph_messenger *msgr, 2441 struct ceph_entity_addr *myaddr, 2442 u32 supported_features, 2443 u32 required_features, 2444 bool nocrc) 2445 { 2446 msgr->supported_features = supported_features; 2447 msgr->required_features = required_features; 2448 2449 spin_lock_init(&msgr->global_seq_lock); 2450 2451 if (myaddr) 2452 msgr->inst.addr = *myaddr; 2453 2454 /* select a random nonce */ 2455 msgr->inst.addr.type = 0; 2456 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); 2457 encode_my_addr(msgr); 2458 msgr->nocrc = nocrc; 2459 2460 atomic_set(&msgr->stopping, 0); 2461 2462 dout("%s %p\n", __func__, msgr); 2463 } 2464 EXPORT_SYMBOL(ceph_messenger_init); 2465 2466 static void clear_standby(struct ceph_connection *con) 2467 { 2468 /* come back from STANDBY? */ 2469 if (con->state == CON_STATE_STANDBY) { 2470 dout("clear_standby %p and ++connect_seq\n", con); 2471 con->state = CON_STATE_PREOPEN; 2472 con->connect_seq++; 2473 WARN_ON(test_bit(CON_FLAG_WRITE_PENDING, &con->flags)); 2474 WARN_ON(test_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags)); 2475 } 2476 } 2477 2478 /* 2479 * Queue up an outgoing message on the given connection. 2480 */ 2481 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) 2482 { 2483 /* set src+dst */ 2484 msg->hdr.src = con->msgr->inst.name; 2485 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len)); 2486 msg->needs_out_seq = true; 2487 2488 mutex_lock(&con->mutex); 2489 2490 if (con->state == CON_STATE_CLOSED) { 2491 dout("con_send %p closed, dropping %p\n", con, msg); 2492 ceph_msg_put(msg); 2493 mutex_unlock(&con->mutex); 2494 return; 2495 } 2496 2497 BUG_ON(msg->con != NULL); 2498 msg->con = con->ops->get(con); 2499 BUG_ON(msg->con == NULL); 2500 2501 BUG_ON(!list_empty(&msg->list_head)); 2502 list_add_tail(&msg->list_head, &con->out_queue); 2503 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg, 2504 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type), 2505 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)), 2506 le32_to_cpu(msg->hdr.front_len), 2507 le32_to_cpu(msg->hdr.middle_len), 2508 le32_to_cpu(msg->hdr.data_len)); 2509 2510 clear_standby(con); 2511 mutex_unlock(&con->mutex); 2512 2513 /* if there wasn't anything waiting to send before, queue 2514 * new work */ 2515 if (test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0) 2516 queue_con(con); 2517 } 2518 EXPORT_SYMBOL(ceph_con_send); 2519 2520 /* 2521 * Revoke a message that was previously queued for send 2522 */ 2523 void ceph_msg_revoke(struct ceph_msg *msg) 2524 { 2525 struct ceph_connection *con = msg->con; 2526 2527 if (!con) 2528 return; /* Message not in our possession */ 2529 2530 mutex_lock(&con->mutex); 2531 if (!list_empty(&msg->list_head)) { 2532 dout("%s %p msg %p - was on queue\n", __func__, con, msg); 2533 list_del_init(&msg->list_head); 2534 BUG_ON(msg->con == NULL); 2535 msg->con->ops->put(msg->con); 2536 msg->con = NULL; 2537 msg->hdr.seq = 0; 2538 2539 ceph_msg_put(msg); 2540 } 2541 if (con->out_msg == msg) { 2542 dout("%s %p msg %p - was sending\n", __func__, con, msg); 2543 con->out_msg = NULL; 2544 if (con->out_kvec_is_msg) { 2545 con->out_skip = con->out_kvec_bytes; 2546 con->out_kvec_is_msg = false; 2547 } 2548 msg->hdr.seq = 0; 2549 2550 ceph_msg_put(msg); 2551 } 2552 mutex_unlock(&con->mutex); 2553 } 2554 2555 /* 2556 * Revoke a message that we may be reading data into 2557 */ 2558 void ceph_msg_revoke_incoming(struct ceph_msg *msg) 2559 { 2560 struct ceph_connection *con; 2561 2562 BUG_ON(msg == NULL); 2563 if (!msg->con) { 2564 dout("%s msg %p null con\n", __func__, msg); 2565 2566 return; /* Message not in our possession */ 2567 } 2568 2569 con = msg->con; 2570 mutex_lock(&con->mutex); 2571 if (con->in_msg == msg) { 2572 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len); 2573 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len); 2574 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len); 2575 2576 /* skip rest of message */ 2577 dout("%s %p msg %p revoked\n", __func__, con, msg); 2578 con->in_base_pos = con->in_base_pos - 2579 sizeof(struct ceph_msg_header) - 2580 front_len - 2581 middle_len - 2582 data_len - 2583 sizeof(struct ceph_msg_footer); 2584 ceph_msg_put(con->in_msg); 2585 con->in_msg = NULL; 2586 con->in_tag = CEPH_MSGR_TAG_READY; 2587 con->in_seq++; 2588 } else { 2589 dout("%s %p in_msg %p msg %p no-op\n", 2590 __func__, con, con->in_msg, msg); 2591 } 2592 mutex_unlock(&con->mutex); 2593 } 2594 2595 /* 2596 * Queue a keepalive byte to ensure the tcp connection is alive. 2597 */ 2598 void ceph_con_keepalive(struct ceph_connection *con) 2599 { 2600 dout("con_keepalive %p\n", con); 2601 mutex_lock(&con->mutex); 2602 clear_standby(con); 2603 mutex_unlock(&con->mutex); 2604 if (test_and_set_bit(CON_FLAG_KEEPALIVE_PENDING, &con->flags) == 0 && 2605 test_and_set_bit(CON_FLAG_WRITE_PENDING, &con->flags) == 0) 2606 queue_con(con); 2607 } 2608 EXPORT_SYMBOL(ceph_con_keepalive); 2609 2610 2611 /* 2612 * construct a new message with given type, size 2613 * the new msg has a ref count of 1. 2614 */ 2615 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, 2616 bool can_fail) 2617 { 2618 struct ceph_msg *m; 2619 2620 m = kmalloc(sizeof(*m), flags); 2621 if (m == NULL) 2622 goto out; 2623 kref_init(&m->kref); 2624 2625 m->con = NULL; 2626 INIT_LIST_HEAD(&m->list_head); 2627 2628 m->hdr.tid = 0; 2629 m->hdr.type = cpu_to_le16(type); 2630 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT); 2631 m->hdr.version = 0; 2632 m->hdr.front_len = cpu_to_le32(front_len); 2633 m->hdr.middle_len = 0; 2634 m->hdr.data_len = 0; 2635 m->hdr.data_off = 0; 2636 m->hdr.reserved = 0; 2637 m->footer.front_crc = 0; 2638 m->footer.middle_crc = 0; 2639 m->footer.data_crc = 0; 2640 m->footer.flags = 0; 2641 m->front_max = front_len; 2642 m->front_is_vmalloc = false; 2643 m->more_to_follow = false; 2644 m->ack_stamp = 0; 2645 m->pool = NULL; 2646 2647 /* middle */ 2648 m->middle = NULL; 2649 2650 /* data */ 2651 m->nr_pages = 0; 2652 m->page_alignment = 0; 2653 m->pages = NULL; 2654 m->pagelist = NULL; 2655 m->bio = NULL; 2656 m->bio_iter = NULL; 2657 m->bio_seg = 0; 2658 m->trail = NULL; 2659 2660 /* front */ 2661 if (front_len) { 2662 if (front_len > PAGE_CACHE_SIZE) { 2663 m->front.iov_base = __vmalloc(front_len, flags, 2664 PAGE_KERNEL); 2665 m->front_is_vmalloc = true; 2666 } else { 2667 m->front.iov_base = kmalloc(front_len, flags); 2668 } 2669 if (m->front.iov_base == NULL) { 2670 dout("ceph_msg_new can't allocate %d bytes\n", 2671 front_len); 2672 goto out2; 2673 } 2674 } else { 2675 m->front.iov_base = NULL; 2676 } 2677 m->front.iov_len = front_len; 2678 2679 dout("ceph_msg_new %p front %d\n", m, front_len); 2680 return m; 2681 2682 out2: 2683 ceph_msg_put(m); 2684 out: 2685 if (!can_fail) { 2686 pr_err("msg_new can't create type %d front %d\n", type, 2687 front_len); 2688 WARN_ON(1); 2689 } else { 2690 dout("msg_new can't create type %d front %d\n", type, 2691 front_len); 2692 } 2693 return NULL; 2694 } 2695 EXPORT_SYMBOL(ceph_msg_new); 2696 2697 /* 2698 * Allocate "middle" portion of a message, if it is needed and wasn't 2699 * allocated by alloc_msg. This allows us to read a small fixed-size 2700 * per-type header in the front and then gracefully fail (i.e., 2701 * propagate the error to the caller based on info in the front) when 2702 * the middle is too large. 2703 */ 2704 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) 2705 { 2706 int type = le16_to_cpu(msg->hdr.type); 2707 int middle_len = le32_to_cpu(msg->hdr.middle_len); 2708 2709 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type, 2710 ceph_msg_type_name(type), middle_len); 2711 BUG_ON(!middle_len); 2712 BUG_ON(msg->middle); 2713 2714 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS); 2715 if (!msg->middle) 2716 return -ENOMEM; 2717 return 0; 2718 } 2719 2720 /* 2721 * Allocate a message for receiving an incoming message on a 2722 * connection, and save the result in con->in_msg. Uses the 2723 * connection's private alloc_msg op if available. 2724 * 2725 * Returns 0 on success, or a negative error code. 2726 * 2727 * On success, if we set *skip = 1: 2728 * - the next message should be skipped and ignored. 2729 * - con->in_msg == NULL 2730 * or if we set *skip = 0: 2731 * - con->in_msg is non-null. 2732 * On error (ENOMEM, EAGAIN, ...), 2733 * - con->in_msg == NULL 2734 */ 2735 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip) 2736 { 2737 struct ceph_msg_header *hdr = &con->in_hdr; 2738 int type = le16_to_cpu(hdr->type); 2739 int front_len = le32_to_cpu(hdr->front_len); 2740 int middle_len = le32_to_cpu(hdr->middle_len); 2741 int ret = 0; 2742 2743 BUG_ON(con->in_msg != NULL); 2744 2745 if (con->ops->alloc_msg) { 2746 struct ceph_msg *msg; 2747 2748 mutex_unlock(&con->mutex); 2749 msg = con->ops->alloc_msg(con, hdr, skip); 2750 mutex_lock(&con->mutex); 2751 if (con->state != CON_STATE_OPEN) { 2752 ceph_msg_put(msg); 2753 return -EAGAIN; 2754 } 2755 con->in_msg = msg; 2756 if (con->in_msg) { 2757 con->in_msg->con = con->ops->get(con); 2758 BUG_ON(con->in_msg->con == NULL); 2759 } 2760 if (*skip) { 2761 con->in_msg = NULL; 2762 return 0; 2763 } 2764 if (!con->in_msg) { 2765 con->error_msg = 2766 "error allocating memory for incoming message"; 2767 return -ENOMEM; 2768 } 2769 } 2770 if (!con->in_msg) { 2771 con->in_msg = ceph_msg_new(type, front_len, GFP_NOFS, false); 2772 if (!con->in_msg) { 2773 pr_err("unable to allocate msg type %d len %d\n", 2774 type, front_len); 2775 return -ENOMEM; 2776 } 2777 con->in_msg->con = con->ops->get(con); 2778 BUG_ON(con->in_msg->con == NULL); 2779 con->in_msg->page_alignment = le16_to_cpu(hdr->data_off); 2780 } 2781 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); 2782 2783 if (middle_len && !con->in_msg->middle) { 2784 ret = ceph_alloc_middle(con, con->in_msg); 2785 if (ret < 0) { 2786 ceph_msg_put(con->in_msg); 2787 con->in_msg = NULL; 2788 } 2789 } 2790 2791 return ret; 2792 } 2793 2794 2795 /* 2796 * Free a generically kmalloc'd message. 2797 */ 2798 void ceph_msg_kfree(struct ceph_msg *m) 2799 { 2800 dout("msg_kfree %p\n", m); 2801 if (m->front_is_vmalloc) 2802 vfree(m->front.iov_base); 2803 else 2804 kfree(m->front.iov_base); 2805 kfree(m); 2806 } 2807 2808 /* 2809 * Drop a msg ref. Destroy as needed. 2810 */ 2811 void ceph_msg_last_put(struct kref *kref) 2812 { 2813 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref); 2814 2815 dout("ceph_msg_put last one on %p\n", m); 2816 WARN_ON(!list_empty(&m->list_head)); 2817 2818 /* drop middle, data, if any */ 2819 if (m->middle) { 2820 ceph_buffer_put(m->middle); 2821 m->middle = NULL; 2822 } 2823 m->nr_pages = 0; 2824 m->pages = NULL; 2825 2826 if (m->pagelist) { 2827 ceph_pagelist_release(m->pagelist); 2828 kfree(m->pagelist); 2829 m->pagelist = NULL; 2830 } 2831 2832 m->trail = NULL; 2833 2834 if (m->pool) 2835 ceph_msgpool_put(m->pool, m); 2836 else 2837 ceph_msg_kfree(m); 2838 } 2839 EXPORT_SYMBOL(ceph_msg_last_put); 2840 2841 void ceph_msg_dump(struct ceph_msg *msg) 2842 { 2843 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg, 2844 msg->front_max, msg->nr_pages); 2845 print_hex_dump(KERN_DEBUG, "header: ", 2846 DUMP_PREFIX_OFFSET, 16, 1, 2847 &msg->hdr, sizeof(msg->hdr), true); 2848 print_hex_dump(KERN_DEBUG, " front: ", 2849 DUMP_PREFIX_OFFSET, 16, 1, 2850 msg->front.iov_base, msg->front.iov_len, true); 2851 if (msg->middle) 2852 print_hex_dump(KERN_DEBUG, "middle: ", 2853 DUMP_PREFIX_OFFSET, 16, 1, 2854 msg->middle->vec.iov_base, 2855 msg->middle->vec.iov_len, true); 2856 print_hex_dump(KERN_DEBUG, "footer: ", 2857 DUMP_PREFIX_OFFSET, 16, 1, 2858 &msg->footer, sizeof(msg->footer), true); 2859 } 2860 EXPORT_SYMBOL(ceph_msg_dump); 2861