1 #include "ceph_debug.h" 2 3 #include <linux/wait.h> 4 #include <linux/slab.h> 5 #include <linux/sched.h> 6 #include <linux/smp_lock.h> 7 8 #include "mds_client.h" 9 #include "mon_client.h" 10 #include "super.h" 11 #include "messenger.h" 12 #include "decode.h" 13 #include "auth.h" 14 #include "pagelist.h" 15 16 /* 17 * A cluster of MDS (metadata server) daemons is responsible for 18 * managing the file system namespace (the directory hierarchy and 19 * inodes) and for coordinating shared access to storage. Metadata is 20 * partitioning hierarchically across a number of servers, and that 21 * partition varies over time as the cluster adjusts the distribution 22 * in order to balance load. 23 * 24 * The MDS client is primarily responsible to managing synchronous 25 * metadata requests for operations like open, unlink, and so forth. 26 * If there is a MDS failure, we find out about it when we (possibly 27 * request and) receive a new MDS map, and can resubmit affected 28 * requests. 29 * 30 * For the most part, though, we take advantage of a lossless 31 * communications channel to the MDS, and do not need to worry about 32 * timing out or resubmitting requests. 33 * 34 * We maintain a stateful "session" with each MDS we interact with. 35 * Within each session, we sent periodic heartbeat messages to ensure 36 * any capabilities or leases we have been issues remain valid. If 37 * the session times out and goes stale, our leases and capabilities 38 * are no longer valid. 39 */ 40 41 struct ceph_reconnect_state { 42 struct ceph_pagelist *pagelist; 43 bool flock; 44 }; 45 46 static void __wake_requests(struct ceph_mds_client *mdsc, 47 struct list_head *head); 48 49 static const struct ceph_connection_operations mds_con_ops; 50 51 52 /* 53 * mds reply parsing 54 */ 55 56 /* 57 * parse individual inode info 58 */ 59 static int parse_reply_info_in(void **p, void *end, 60 struct ceph_mds_reply_info_in *info) 61 { 62 int err = -EIO; 63 64 info->in = *p; 65 *p += sizeof(struct ceph_mds_reply_inode) + 66 sizeof(*info->in->fragtree.splits) * 67 le32_to_cpu(info->in->fragtree.nsplits); 68 69 ceph_decode_32_safe(p, end, info->symlink_len, bad); 70 ceph_decode_need(p, end, info->symlink_len, bad); 71 info->symlink = *p; 72 *p += info->symlink_len; 73 74 ceph_decode_32_safe(p, end, info->xattr_len, bad); 75 ceph_decode_need(p, end, info->xattr_len, bad); 76 info->xattr_data = *p; 77 *p += info->xattr_len; 78 return 0; 79 bad: 80 return err; 81 } 82 83 /* 84 * parse a normal reply, which may contain a (dir+)dentry and/or a 85 * target inode. 86 */ 87 static int parse_reply_info_trace(void **p, void *end, 88 struct ceph_mds_reply_info_parsed *info) 89 { 90 int err; 91 92 if (info->head->is_dentry) { 93 err = parse_reply_info_in(p, end, &info->diri); 94 if (err < 0) 95 goto out_bad; 96 97 if (unlikely(*p + sizeof(*info->dirfrag) > end)) 98 goto bad; 99 info->dirfrag = *p; 100 *p += sizeof(*info->dirfrag) + 101 sizeof(u32)*le32_to_cpu(info->dirfrag->ndist); 102 if (unlikely(*p > end)) 103 goto bad; 104 105 ceph_decode_32_safe(p, end, info->dname_len, bad); 106 ceph_decode_need(p, end, info->dname_len, bad); 107 info->dname = *p; 108 *p += info->dname_len; 109 info->dlease = *p; 110 *p += sizeof(*info->dlease); 111 } 112 113 if (info->head->is_target) { 114 err = parse_reply_info_in(p, end, &info->targeti); 115 if (err < 0) 116 goto out_bad; 117 } 118 119 if (unlikely(*p != end)) 120 goto bad; 121 return 0; 122 123 bad: 124 err = -EIO; 125 out_bad: 126 pr_err("problem parsing mds trace %d\n", err); 127 return err; 128 } 129 130 /* 131 * parse readdir results 132 */ 133 static int parse_reply_info_dir(void **p, void *end, 134 struct ceph_mds_reply_info_parsed *info) 135 { 136 u32 num, i = 0; 137 int err; 138 139 info->dir_dir = *p; 140 if (*p + sizeof(*info->dir_dir) > end) 141 goto bad; 142 *p += sizeof(*info->dir_dir) + 143 sizeof(u32)*le32_to_cpu(info->dir_dir->ndist); 144 if (*p > end) 145 goto bad; 146 147 ceph_decode_need(p, end, sizeof(num) + 2, bad); 148 num = ceph_decode_32(p); 149 info->dir_end = ceph_decode_8(p); 150 info->dir_complete = ceph_decode_8(p); 151 if (num == 0) 152 goto done; 153 154 /* alloc large array */ 155 info->dir_nr = num; 156 info->dir_in = kcalloc(num, sizeof(*info->dir_in) + 157 sizeof(*info->dir_dname) + 158 sizeof(*info->dir_dname_len) + 159 sizeof(*info->dir_dlease), 160 GFP_NOFS); 161 if (info->dir_in == NULL) { 162 err = -ENOMEM; 163 goto out_bad; 164 } 165 info->dir_dname = (void *)(info->dir_in + num); 166 info->dir_dname_len = (void *)(info->dir_dname + num); 167 info->dir_dlease = (void *)(info->dir_dname_len + num); 168 169 while (num) { 170 /* dentry */ 171 ceph_decode_need(p, end, sizeof(u32)*2, bad); 172 info->dir_dname_len[i] = ceph_decode_32(p); 173 ceph_decode_need(p, end, info->dir_dname_len[i], bad); 174 info->dir_dname[i] = *p; 175 *p += info->dir_dname_len[i]; 176 dout("parsed dir dname '%.*s'\n", info->dir_dname_len[i], 177 info->dir_dname[i]); 178 info->dir_dlease[i] = *p; 179 *p += sizeof(struct ceph_mds_reply_lease); 180 181 /* inode */ 182 err = parse_reply_info_in(p, end, &info->dir_in[i]); 183 if (err < 0) 184 goto out_bad; 185 i++; 186 num--; 187 } 188 189 done: 190 if (*p != end) 191 goto bad; 192 return 0; 193 194 bad: 195 err = -EIO; 196 out_bad: 197 pr_err("problem parsing dir contents %d\n", err); 198 return err; 199 } 200 201 /* 202 * parse entire mds reply 203 */ 204 static int parse_reply_info(struct ceph_msg *msg, 205 struct ceph_mds_reply_info_parsed *info) 206 { 207 void *p, *end; 208 u32 len; 209 int err; 210 211 info->head = msg->front.iov_base; 212 p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head); 213 end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head); 214 215 /* trace */ 216 ceph_decode_32_safe(&p, end, len, bad); 217 if (len > 0) { 218 err = parse_reply_info_trace(&p, p+len, info); 219 if (err < 0) 220 goto out_bad; 221 } 222 223 /* dir content */ 224 ceph_decode_32_safe(&p, end, len, bad); 225 if (len > 0) { 226 err = parse_reply_info_dir(&p, p+len, info); 227 if (err < 0) 228 goto out_bad; 229 } 230 231 /* snap blob */ 232 ceph_decode_32_safe(&p, end, len, bad); 233 info->snapblob_len = len; 234 info->snapblob = p; 235 p += len; 236 237 if (p != end) 238 goto bad; 239 return 0; 240 241 bad: 242 err = -EIO; 243 out_bad: 244 pr_err("mds parse_reply err %d\n", err); 245 return err; 246 } 247 248 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info) 249 { 250 kfree(info->dir_in); 251 } 252 253 254 /* 255 * sessions 256 */ 257 static const char *session_state_name(int s) 258 { 259 switch (s) { 260 case CEPH_MDS_SESSION_NEW: return "new"; 261 case CEPH_MDS_SESSION_OPENING: return "opening"; 262 case CEPH_MDS_SESSION_OPEN: return "open"; 263 case CEPH_MDS_SESSION_HUNG: return "hung"; 264 case CEPH_MDS_SESSION_CLOSING: return "closing"; 265 case CEPH_MDS_SESSION_RESTARTING: return "restarting"; 266 case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting"; 267 default: return "???"; 268 } 269 } 270 271 static struct ceph_mds_session *get_session(struct ceph_mds_session *s) 272 { 273 if (atomic_inc_not_zero(&s->s_ref)) { 274 dout("mdsc get_session %p %d -> %d\n", s, 275 atomic_read(&s->s_ref)-1, atomic_read(&s->s_ref)); 276 return s; 277 } else { 278 dout("mdsc get_session %p 0 -- FAIL", s); 279 return NULL; 280 } 281 } 282 283 void ceph_put_mds_session(struct ceph_mds_session *s) 284 { 285 dout("mdsc put_session %p %d -> %d\n", s, 286 atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); 287 if (atomic_dec_and_test(&s->s_ref)) { 288 if (s->s_authorizer) 289 s->s_mdsc->client->monc.auth->ops->destroy_authorizer( 290 s->s_mdsc->client->monc.auth, s->s_authorizer); 291 kfree(s); 292 } 293 } 294 295 /* 296 * called under mdsc->mutex 297 */ 298 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc, 299 int mds) 300 { 301 struct ceph_mds_session *session; 302 303 if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL) 304 return NULL; 305 session = mdsc->sessions[mds]; 306 dout("lookup_mds_session %p %d\n", session, 307 atomic_read(&session->s_ref)); 308 get_session(session); 309 return session; 310 } 311 312 static bool __have_session(struct ceph_mds_client *mdsc, int mds) 313 { 314 if (mds >= mdsc->max_sessions) 315 return false; 316 return mdsc->sessions[mds]; 317 } 318 319 static int __verify_registered_session(struct ceph_mds_client *mdsc, 320 struct ceph_mds_session *s) 321 { 322 if (s->s_mds >= mdsc->max_sessions || 323 mdsc->sessions[s->s_mds] != s) 324 return -ENOENT; 325 return 0; 326 } 327 328 /* 329 * create+register a new session for given mds. 330 * called under mdsc->mutex. 331 */ 332 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, 333 int mds) 334 { 335 struct ceph_mds_session *s; 336 337 s = kzalloc(sizeof(*s), GFP_NOFS); 338 if (!s) 339 return ERR_PTR(-ENOMEM); 340 s->s_mdsc = mdsc; 341 s->s_mds = mds; 342 s->s_state = CEPH_MDS_SESSION_NEW; 343 s->s_ttl = 0; 344 s->s_seq = 0; 345 mutex_init(&s->s_mutex); 346 347 ceph_con_init(mdsc->client->msgr, &s->s_con); 348 s->s_con.private = s; 349 s->s_con.ops = &mds_con_ops; 350 s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS; 351 s->s_con.peer_name.num = cpu_to_le64(mds); 352 353 spin_lock_init(&s->s_cap_lock); 354 s->s_cap_gen = 0; 355 s->s_cap_ttl = 0; 356 s->s_renew_requested = 0; 357 s->s_renew_seq = 0; 358 INIT_LIST_HEAD(&s->s_caps); 359 s->s_nr_caps = 0; 360 s->s_trim_caps = 0; 361 atomic_set(&s->s_ref, 1); 362 INIT_LIST_HEAD(&s->s_waiting); 363 INIT_LIST_HEAD(&s->s_unsafe); 364 s->s_num_cap_releases = 0; 365 s->s_cap_iterator = NULL; 366 INIT_LIST_HEAD(&s->s_cap_releases); 367 INIT_LIST_HEAD(&s->s_cap_releases_done); 368 INIT_LIST_HEAD(&s->s_cap_flushing); 369 INIT_LIST_HEAD(&s->s_cap_snaps_flushing); 370 371 dout("register_session mds%d\n", mds); 372 if (mds >= mdsc->max_sessions) { 373 int newmax = 1 << get_count_order(mds+1); 374 struct ceph_mds_session **sa; 375 376 dout("register_session realloc to %d\n", newmax); 377 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS); 378 if (sa == NULL) 379 goto fail_realloc; 380 if (mdsc->sessions) { 381 memcpy(sa, mdsc->sessions, 382 mdsc->max_sessions * sizeof(void *)); 383 kfree(mdsc->sessions); 384 } 385 mdsc->sessions = sa; 386 mdsc->max_sessions = newmax; 387 } 388 mdsc->sessions[mds] = s; 389 atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */ 390 391 ceph_con_open(&s->s_con, ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); 392 393 return s; 394 395 fail_realloc: 396 kfree(s); 397 return ERR_PTR(-ENOMEM); 398 } 399 400 /* 401 * called under mdsc->mutex 402 */ 403 static void __unregister_session(struct ceph_mds_client *mdsc, 404 struct ceph_mds_session *s) 405 { 406 dout("__unregister_session mds%d %p\n", s->s_mds, s); 407 BUG_ON(mdsc->sessions[s->s_mds] != s); 408 mdsc->sessions[s->s_mds] = NULL; 409 ceph_con_close(&s->s_con); 410 ceph_put_mds_session(s); 411 } 412 413 /* 414 * drop session refs in request. 415 * 416 * should be last request ref, or hold mdsc->mutex 417 */ 418 static void put_request_session(struct ceph_mds_request *req) 419 { 420 if (req->r_session) { 421 ceph_put_mds_session(req->r_session); 422 req->r_session = NULL; 423 } 424 } 425 426 void ceph_mdsc_release_request(struct kref *kref) 427 { 428 struct ceph_mds_request *req = container_of(kref, 429 struct ceph_mds_request, 430 r_kref); 431 if (req->r_request) 432 ceph_msg_put(req->r_request); 433 if (req->r_reply) { 434 ceph_msg_put(req->r_reply); 435 destroy_reply_info(&req->r_reply_info); 436 } 437 if (req->r_inode) { 438 ceph_put_cap_refs(ceph_inode(req->r_inode), 439 CEPH_CAP_PIN); 440 iput(req->r_inode); 441 } 442 if (req->r_locked_dir) 443 ceph_put_cap_refs(ceph_inode(req->r_locked_dir), 444 CEPH_CAP_PIN); 445 if (req->r_target_inode) 446 iput(req->r_target_inode); 447 if (req->r_dentry) 448 dput(req->r_dentry); 449 if (req->r_old_dentry) { 450 ceph_put_cap_refs( 451 ceph_inode(req->r_old_dentry->d_parent->d_inode), 452 CEPH_CAP_PIN); 453 dput(req->r_old_dentry); 454 } 455 kfree(req->r_path1); 456 kfree(req->r_path2); 457 put_request_session(req); 458 ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation); 459 kfree(req); 460 } 461 462 /* 463 * lookup session, bump ref if found. 464 * 465 * called under mdsc->mutex. 466 */ 467 static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc, 468 u64 tid) 469 { 470 struct ceph_mds_request *req; 471 struct rb_node *n = mdsc->request_tree.rb_node; 472 473 while (n) { 474 req = rb_entry(n, struct ceph_mds_request, r_node); 475 if (tid < req->r_tid) 476 n = n->rb_left; 477 else if (tid > req->r_tid) 478 n = n->rb_right; 479 else { 480 ceph_mdsc_get_request(req); 481 return req; 482 } 483 } 484 return NULL; 485 } 486 487 static void __insert_request(struct ceph_mds_client *mdsc, 488 struct ceph_mds_request *new) 489 { 490 struct rb_node **p = &mdsc->request_tree.rb_node; 491 struct rb_node *parent = NULL; 492 struct ceph_mds_request *req = NULL; 493 494 while (*p) { 495 parent = *p; 496 req = rb_entry(parent, struct ceph_mds_request, r_node); 497 if (new->r_tid < req->r_tid) 498 p = &(*p)->rb_left; 499 else if (new->r_tid > req->r_tid) 500 p = &(*p)->rb_right; 501 else 502 BUG(); 503 } 504 505 rb_link_node(&new->r_node, parent, p); 506 rb_insert_color(&new->r_node, &mdsc->request_tree); 507 } 508 509 /* 510 * Register an in-flight request, and assign a tid. Link to directory 511 * are modifying (if any). 512 * 513 * Called under mdsc->mutex. 514 */ 515 static void __register_request(struct ceph_mds_client *mdsc, 516 struct ceph_mds_request *req, 517 struct inode *dir) 518 { 519 req->r_tid = ++mdsc->last_tid; 520 if (req->r_num_caps) 521 ceph_reserve_caps(mdsc, &req->r_caps_reservation, 522 req->r_num_caps); 523 dout("__register_request %p tid %lld\n", req, req->r_tid); 524 ceph_mdsc_get_request(req); 525 __insert_request(mdsc, req); 526 527 if (dir) { 528 struct ceph_inode_info *ci = ceph_inode(dir); 529 530 spin_lock(&ci->i_unsafe_lock); 531 req->r_unsafe_dir = dir; 532 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops); 533 spin_unlock(&ci->i_unsafe_lock); 534 } 535 } 536 537 static void __unregister_request(struct ceph_mds_client *mdsc, 538 struct ceph_mds_request *req) 539 { 540 dout("__unregister_request %p tid %lld\n", req, req->r_tid); 541 rb_erase(&req->r_node, &mdsc->request_tree); 542 RB_CLEAR_NODE(&req->r_node); 543 544 if (req->r_unsafe_dir) { 545 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir); 546 547 spin_lock(&ci->i_unsafe_lock); 548 list_del_init(&req->r_unsafe_dir_item); 549 spin_unlock(&ci->i_unsafe_lock); 550 } 551 552 ceph_mdsc_put_request(req); 553 } 554 555 /* 556 * Choose mds to send request to next. If there is a hint set in the 557 * request (e.g., due to a prior forward hint from the mds), use that. 558 * Otherwise, consult frag tree and/or caps to identify the 559 * appropriate mds. If all else fails, choose randomly. 560 * 561 * Called under mdsc->mutex. 562 */ 563 static int __choose_mds(struct ceph_mds_client *mdsc, 564 struct ceph_mds_request *req) 565 { 566 struct inode *inode; 567 struct ceph_inode_info *ci; 568 struct ceph_cap *cap; 569 int mode = req->r_direct_mode; 570 int mds = -1; 571 u32 hash = req->r_direct_hash; 572 bool is_hash = req->r_direct_is_hash; 573 574 /* 575 * is there a specific mds we should try? ignore hint if we have 576 * no session and the mds is not up (active or recovering). 577 */ 578 if (req->r_resend_mds >= 0 && 579 (__have_session(mdsc, req->r_resend_mds) || 580 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) { 581 dout("choose_mds using resend_mds mds%d\n", 582 req->r_resend_mds); 583 return req->r_resend_mds; 584 } 585 586 if (mode == USE_RANDOM_MDS) 587 goto random; 588 589 inode = NULL; 590 if (req->r_inode) { 591 inode = req->r_inode; 592 } else if (req->r_dentry) { 593 if (req->r_dentry->d_inode) { 594 inode = req->r_dentry->d_inode; 595 } else { 596 inode = req->r_dentry->d_parent->d_inode; 597 hash = req->r_dentry->d_name.hash; 598 is_hash = true; 599 } 600 } 601 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode, (int)is_hash, 602 (int)hash, mode); 603 if (!inode) 604 goto random; 605 ci = ceph_inode(inode); 606 607 if (is_hash && S_ISDIR(inode->i_mode)) { 608 struct ceph_inode_frag frag; 609 int found; 610 611 ceph_choose_frag(ci, hash, &frag, &found); 612 if (found) { 613 if (mode == USE_ANY_MDS && frag.ndist > 0) { 614 u8 r; 615 616 /* choose a random replica */ 617 get_random_bytes(&r, 1); 618 r %= frag.ndist; 619 mds = frag.dist[r]; 620 dout("choose_mds %p %llx.%llx " 621 "frag %u mds%d (%d/%d)\n", 622 inode, ceph_vinop(inode), 623 frag.frag, frag.mds, 624 (int)r, frag.ndist); 625 return mds; 626 } 627 628 /* since this file/dir wasn't known to be 629 * replicated, then we want to look for the 630 * authoritative mds. */ 631 mode = USE_AUTH_MDS; 632 if (frag.mds >= 0) { 633 /* choose auth mds */ 634 mds = frag.mds; 635 dout("choose_mds %p %llx.%llx " 636 "frag %u mds%d (auth)\n", 637 inode, ceph_vinop(inode), frag.frag, mds); 638 return mds; 639 } 640 } 641 } 642 643 spin_lock(&inode->i_lock); 644 cap = NULL; 645 if (mode == USE_AUTH_MDS) 646 cap = ci->i_auth_cap; 647 if (!cap && !RB_EMPTY_ROOT(&ci->i_caps)) 648 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node); 649 if (!cap) { 650 spin_unlock(&inode->i_lock); 651 goto random; 652 } 653 mds = cap->session->s_mds; 654 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n", 655 inode, ceph_vinop(inode), mds, 656 cap == ci->i_auth_cap ? "auth " : "", cap); 657 spin_unlock(&inode->i_lock); 658 return mds; 659 660 random: 661 mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap); 662 dout("choose_mds chose random mds%d\n", mds); 663 return mds; 664 } 665 666 667 /* 668 * session messages 669 */ 670 static struct ceph_msg *create_session_msg(u32 op, u64 seq) 671 { 672 struct ceph_msg *msg; 673 struct ceph_mds_session_head *h; 674 675 msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS); 676 if (!msg) { 677 pr_err("create_session_msg ENOMEM creating msg\n"); 678 return NULL; 679 } 680 h = msg->front.iov_base; 681 h->op = cpu_to_le32(op); 682 h->seq = cpu_to_le64(seq); 683 return msg; 684 } 685 686 /* 687 * send session open request. 688 * 689 * called under mdsc->mutex 690 */ 691 static int __open_session(struct ceph_mds_client *mdsc, 692 struct ceph_mds_session *session) 693 { 694 struct ceph_msg *msg; 695 int mstate; 696 int mds = session->s_mds; 697 698 /* wait for mds to go active? */ 699 mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); 700 dout("open_session to mds%d (%s)\n", mds, 701 ceph_mds_state_name(mstate)); 702 session->s_state = CEPH_MDS_SESSION_OPENING; 703 session->s_renew_requested = jiffies; 704 705 /* send connect message */ 706 msg = create_session_msg(CEPH_SESSION_REQUEST_OPEN, session->s_seq); 707 if (!msg) 708 return -ENOMEM; 709 ceph_con_send(&session->s_con, msg); 710 return 0; 711 } 712 713 /* 714 * open sessions for any export targets for the given mds 715 * 716 * called under mdsc->mutex 717 */ 718 static void __open_export_target_sessions(struct ceph_mds_client *mdsc, 719 struct ceph_mds_session *session) 720 { 721 struct ceph_mds_info *mi; 722 struct ceph_mds_session *ts; 723 int i, mds = session->s_mds; 724 int target; 725 726 if (mds >= mdsc->mdsmap->m_max_mds) 727 return; 728 mi = &mdsc->mdsmap->m_info[mds]; 729 dout("open_export_target_sessions for mds%d (%d targets)\n", 730 session->s_mds, mi->num_export_targets); 731 732 for (i = 0; i < mi->num_export_targets; i++) { 733 target = mi->export_targets[i]; 734 ts = __ceph_lookup_mds_session(mdsc, target); 735 if (!ts) { 736 ts = register_session(mdsc, target); 737 if (IS_ERR(ts)) 738 return; 739 } 740 if (session->s_state == CEPH_MDS_SESSION_NEW || 741 session->s_state == CEPH_MDS_SESSION_CLOSING) 742 __open_session(mdsc, session); 743 else 744 dout(" mds%d target mds%d %p is %s\n", session->s_mds, 745 i, ts, session_state_name(ts->s_state)); 746 ceph_put_mds_session(ts); 747 } 748 } 749 750 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc, 751 struct ceph_mds_session *session) 752 { 753 mutex_lock(&mdsc->mutex); 754 __open_export_target_sessions(mdsc, session); 755 mutex_unlock(&mdsc->mutex); 756 } 757 758 /* 759 * session caps 760 */ 761 762 /* 763 * Free preallocated cap messages assigned to this session 764 */ 765 static void cleanup_cap_releases(struct ceph_mds_session *session) 766 { 767 struct ceph_msg *msg; 768 769 spin_lock(&session->s_cap_lock); 770 while (!list_empty(&session->s_cap_releases)) { 771 msg = list_first_entry(&session->s_cap_releases, 772 struct ceph_msg, list_head); 773 list_del_init(&msg->list_head); 774 ceph_msg_put(msg); 775 } 776 while (!list_empty(&session->s_cap_releases_done)) { 777 msg = list_first_entry(&session->s_cap_releases_done, 778 struct ceph_msg, list_head); 779 list_del_init(&msg->list_head); 780 ceph_msg_put(msg); 781 } 782 spin_unlock(&session->s_cap_lock); 783 } 784 785 /* 786 * Helper to safely iterate over all caps associated with a session, with 787 * special care taken to handle a racing __ceph_remove_cap(). 788 * 789 * Caller must hold session s_mutex. 790 */ 791 static int iterate_session_caps(struct ceph_mds_session *session, 792 int (*cb)(struct inode *, struct ceph_cap *, 793 void *), void *arg) 794 { 795 struct list_head *p; 796 struct ceph_cap *cap; 797 struct inode *inode, *last_inode = NULL; 798 struct ceph_cap *old_cap = NULL; 799 int ret; 800 801 dout("iterate_session_caps %p mds%d\n", session, session->s_mds); 802 spin_lock(&session->s_cap_lock); 803 p = session->s_caps.next; 804 while (p != &session->s_caps) { 805 cap = list_entry(p, struct ceph_cap, session_caps); 806 inode = igrab(&cap->ci->vfs_inode); 807 if (!inode) { 808 p = p->next; 809 continue; 810 } 811 session->s_cap_iterator = cap; 812 spin_unlock(&session->s_cap_lock); 813 814 if (last_inode) { 815 iput(last_inode); 816 last_inode = NULL; 817 } 818 if (old_cap) { 819 ceph_put_cap(session->s_mdsc, old_cap); 820 old_cap = NULL; 821 } 822 823 ret = cb(inode, cap, arg); 824 last_inode = inode; 825 826 spin_lock(&session->s_cap_lock); 827 p = p->next; 828 if (cap->ci == NULL) { 829 dout("iterate_session_caps finishing cap %p removal\n", 830 cap); 831 BUG_ON(cap->session != session); 832 list_del_init(&cap->session_caps); 833 session->s_nr_caps--; 834 cap->session = NULL; 835 old_cap = cap; /* put_cap it w/o locks held */ 836 } 837 if (ret < 0) 838 goto out; 839 } 840 ret = 0; 841 out: 842 session->s_cap_iterator = NULL; 843 spin_unlock(&session->s_cap_lock); 844 845 if (last_inode) 846 iput(last_inode); 847 if (old_cap) 848 ceph_put_cap(session->s_mdsc, old_cap); 849 850 return ret; 851 } 852 853 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap, 854 void *arg) 855 { 856 struct ceph_inode_info *ci = ceph_inode(inode); 857 int drop = 0; 858 859 dout("removing cap %p, ci is %p, inode is %p\n", 860 cap, ci, &ci->vfs_inode); 861 spin_lock(&inode->i_lock); 862 __ceph_remove_cap(cap); 863 if (!__ceph_is_any_real_caps(ci)) { 864 struct ceph_mds_client *mdsc = 865 &ceph_sb_to_client(inode->i_sb)->mdsc; 866 867 spin_lock(&mdsc->cap_dirty_lock); 868 if (!list_empty(&ci->i_dirty_item)) { 869 pr_info(" dropping dirty %s state for %p %lld\n", 870 ceph_cap_string(ci->i_dirty_caps), 871 inode, ceph_ino(inode)); 872 ci->i_dirty_caps = 0; 873 list_del_init(&ci->i_dirty_item); 874 drop = 1; 875 } 876 if (!list_empty(&ci->i_flushing_item)) { 877 pr_info(" dropping dirty+flushing %s state for %p %lld\n", 878 ceph_cap_string(ci->i_flushing_caps), 879 inode, ceph_ino(inode)); 880 ci->i_flushing_caps = 0; 881 list_del_init(&ci->i_flushing_item); 882 mdsc->num_cap_flushing--; 883 drop = 1; 884 } 885 if (drop && ci->i_wrbuffer_ref) { 886 pr_info(" dropping dirty data for %p %lld\n", 887 inode, ceph_ino(inode)); 888 ci->i_wrbuffer_ref = 0; 889 ci->i_wrbuffer_ref_head = 0; 890 drop++; 891 } 892 spin_unlock(&mdsc->cap_dirty_lock); 893 } 894 spin_unlock(&inode->i_lock); 895 while (drop--) 896 iput(inode); 897 return 0; 898 } 899 900 /* 901 * caller must hold session s_mutex 902 */ 903 static void remove_session_caps(struct ceph_mds_session *session) 904 { 905 dout("remove_session_caps on %p\n", session); 906 iterate_session_caps(session, remove_session_caps_cb, NULL); 907 BUG_ON(session->s_nr_caps > 0); 908 BUG_ON(!list_empty(&session->s_cap_flushing)); 909 cleanup_cap_releases(session); 910 } 911 912 /* 913 * wake up any threads waiting on this session's caps. if the cap is 914 * old (didn't get renewed on the client reconnect), remove it now. 915 * 916 * caller must hold s_mutex. 917 */ 918 static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap, 919 void *arg) 920 { 921 struct ceph_inode_info *ci = ceph_inode(inode); 922 923 wake_up_all(&ci->i_cap_wq); 924 if (arg) { 925 spin_lock(&inode->i_lock); 926 ci->i_wanted_max_size = 0; 927 ci->i_requested_max_size = 0; 928 spin_unlock(&inode->i_lock); 929 } 930 return 0; 931 } 932 933 static void wake_up_session_caps(struct ceph_mds_session *session, 934 int reconnect) 935 { 936 dout("wake_up_session_caps %p mds%d\n", session, session->s_mds); 937 iterate_session_caps(session, wake_up_session_cb, 938 (void *)(unsigned long)reconnect); 939 } 940 941 /* 942 * Send periodic message to MDS renewing all currently held caps. The 943 * ack will reset the expiration for all caps from this session. 944 * 945 * caller holds s_mutex 946 */ 947 static int send_renew_caps(struct ceph_mds_client *mdsc, 948 struct ceph_mds_session *session) 949 { 950 struct ceph_msg *msg; 951 int state; 952 953 if (time_after_eq(jiffies, session->s_cap_ttl) && 954 time_after_eq(session->s_cap_ttl, session->s_renew_requested)) 955 pr_info("mds%d caps stale\n", session->s_mds); 956 session->s_renew_requested = jiffies; 957 958 /* do not try to renew caps until a recovering mds has reconnected 959 * with its clients. */ 960 state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds); 961 if (state < CEPH_MDS_STATE_RECONNECT) { 962 dout("send_renew_caps ignoring mds%d (%s)\n", 963 session->s_mds, ceph_mds_state_name(state)); 964 return 0; 965 } 966 967 dout("send_renew_caps to mds%d (%s)\n", session->s_mds, 968 ceph_mds_state_name(state)); 969 msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS, 970 ++session->s_renew_seq); 971 if (!msg) 972 return -ENOMEM; 973 ceph_con_send(&session->s_con, msg); 974 return 0; 975 } 976 977 /* 978 * Note new cap ttl, and any transition from stale -> not stale (fresh?). 979 * 980 * Called under session->s_mutex 981 */ 982 static void renewed_caps(struct ceph_mds_client *mdsc, 983 struct ceph_mds_session *session, int is_renew) 984 { 985 int was_stale; 986 int wake = 0; 987 988 spin_lock(&session->s_cap_lock); 989 was_stale = is_renew && (session->s_cap_ttl == 0 || 990 time_after_eq(jiffies, session->s_cap_ttl)); 991 992 session->s_cap_ttl = session->s_renew_requested + 993 mdsc->mdsmap->m_session_timeout*HZ; 994 995 if (was_stale) { 996 if (time_before(jiffies, session->s_cap_ttl)) { 997 pr_info("mds%d caps renewed\n", session->s_mds); 998 wake = 1; 999 } else { 1000 pr_info("mds%d caps still stale\n", session->s_mds); 1001 } 1002 } 1003 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n", 1004 session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh", 1005 time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh"); 1006 spin_unlock(&session->s_cap_lock); 1007 1008 if (wake) 1009 wake_up_session_caps(session, 0); 1010 } 1011 1012 /* 1013 * send a session close request 1014 */ 1015 static int request_close_session(struct ceph_mds_client *mdsc, 1016 struct ceph_mds_session *session) 1017 { 1018 struct ceph_msg *msg; 1019 1020 dout("request_close_session mds%d state %s seq %lld\n", 1021 session->s_mds, session_state_name(session->s_state), 1022 session->s_seq); 1023 msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq); 1024 if (!msg) 1025 return -ENOMEM; 1026 ceph_con_send(&session->s_con, msg); 1027 return 0; 1028 } 1029 1030 /* 1031 * Called with s_mutex held. 1032 */ 1033 static int __close_session(struct ceph_mds_client *mdsc, 1034 struct ceph_mds_session *session) 1035 { 1036 if (session->s_state >= CEPH_MDS_SESSION_CLOSING) 1037 return 0; 1038 session->s_state = CEPH_MDS_SESSION_CLOSING; 1039 return request_close_session(mdsc, session); 1040 } 1041 1042 /* 1043 * Trim old(er) caps. 1044 * 1045 * Because we can't cache an inode without one or more caps, we do 1046 * this indirectly: if a cap is unused, we prune its aliases, at which 1047 * point the inode will hopefully get dropped to. 1048 * 1049 * Yes, this is a bit sloppy. Our only real goal here is to respond to 1050 * memory pressure from the MDS, though, so it needn't be perfect. 1051 */ 1052 static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) 1053 { 1054 struct ceph_mds_session *session = arg; 1055 struct ceph_inode_info *ci = ceph_inode(inode); 1056 int used, oissued, mine; 1057 1058 if (session->s_trim_caps <= 0) 1059 return -1; 1060 1061 spin_lock(&inode->i_lock); 1062 mine = cap->issued | cap->implemented; 1063 used = __ceph_caps_used(ci); 1064 oissued = __ceph_caps_issued_other(ci, cap); 1065 1066 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s\n", 1067 inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued), 1068 ceph_cap_string(used)); 1069 if (ci->i_dirty_caps) 1070 goto out; /* dirty caps */ 1071 if ((used & ~oissued) & mine) 1072 goto out; /* we need these caps */ 1073 1074 session->s_trim_caps--; 1075 if (oissued) { 1076 /* we aren't the only cap.. just remove us */ 1077 __ceph_remove_cap(cap); 1078 } else { 1079 /* try to drop referring dentries */ 1080 spin_unlock(&inode->i_lock); 1081 d_prune_aliases(inode); 1082 dout("trim_caps_cb %p cap %p pruned, count now %d\n", 1083 inode, cap, atomic_read(&inode->i_count)); 1084 return 0; 1085 } 1086 1087 out: 1088 spin_unlock(&inode->i_lock); 1089 return 0; 1090 } 1091 1092 /* 1093 * Trim session cap count down to some max number. 1094 */ 1095 static int trim_caps(struct ceph_mds_client *mdsc, 1096 struct ceph_mds_session *session, 1097 int max_caps) 1098 { 1099 int trim_caps = session->s_nr_caps - max_caps; 1100 1101 dout("trim_caps mds%d start: %d / %d, trim %d\n", 1102 session->s_mds, session->s_nr_caps, max_caps, trim_caps); 1103 if (trim_caps > 0) { 1104 session->s_trim_caps = trim_caps; 1105 iterate_session_caps(session, trim_caps_cb, session); 1106 dout("trim_caps mds%d done: %d / %d, trimmed %d\n", 1107 session->s_mds, session->s_nr_caps, max_caps, 1108 trim_caps - session->s_trim_caps); 1109 session->s_trim_caps = 0; 1110 } 1111 return 0; 1112 } 1113 1114 /* 1115 * Allocate cap_release messages. If there is a partially full message 1116 * in the queue, try to allocate enough to cover it's remainder, so that 1117 * we can send it immediately. 1118 * 1119 * Called under s_mutex. 1120 */ 1121 int ceph_add_cap_releases(struct ceph_mds_client *mdsc, 1122 struct ceph_mds_session *session) 1123 { 1124 struct ceph_msg *msg, *partial = NULL; 1125 struct ceph_mds_cap_release *head; 1126 int err = -ENOMEM; 1127 int extra = mdsc->client->mount_args->cap_release_safety; 1128 int num; 1129 1130 dout("add_cap_releases %p mds%d extra %d\n", session, session->s_mds, 1131 extra); 1132 1133 spin_lock(&session->s_cap_lock); 1134 1135 if (!list_empty(&session->s_cap_releases)) { 1136 msg = list_first_entry(&session->s_cap_releases, 1137 struct ceph_msg, 1138 list_head); 1139 head = msg->front.iov_base; 1140 num = le32_to_cpu(head->num); 1141 if (num) { 1142 dout(" partial %p with (%d/%d)\n", msg, num, 1143 (int)CEPH_CAPS_PER_RELEASE); 1144 extra += CEPH_CAPS_PER_RELEASE - num; 1145 partial = msg; 1146 } 1147 } 1148 while (session->s_num_cap_releases < session->s_nr_caps + extra) { 1149 spin_unlock(&session->s_cap_lock); 1150 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE, PAGE_CACHE_SIZE, 1151 GFP_NOFS); 1152 if (!msg) 1153 goto out_unlocked; 1154 dout("add_cap_releases %p msg %p now %d\n", session, msg, 1155 (int)msg->front.iov_len); 1156 head = msg->front.iov_base; 1157 head->num = cpu_to_le32(0); 1158 msg->front.iov_len = sizeof(*head); 1159 spin_lock(&session->s_cap_lock); 1160 list_add(&msg->list_head, &session->s_cap_releases); 1161 session->s_num_cap_releases += CEPH_CAPS_PER_RELEASE; 1162 } 1163 1164 if (partial) { 1165 head = partial->front.iov_base; 1166 num = le32_to_cpu(head->num); 1167 dout(" queueing partial %p with %d/%d\n", partial, num, 1168 (int)CEPH_CAPS_PER_RELEASE); 1169 list_move_tail(&partial->list_head, 1170 &session->s_cap_releases_done); 1171 session->s_num_cap_releases -= CEPH_CAPS_PER_RELEASE - num; 1172 } 1173 err = 0; 1174 spin_unlock(&session->s_cap_lock); 1175 out_unlocked: 1176 return err; 1177 } 1178 1179 /* 1180 * flush all dirty inode data to disk. 1181 * 1182 * returns true if we've flushed through want_flush_seq 1183 */ 1184 static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq) 1185 { 1186 int mds, ret = 1; 1187 1188 dout("check_cap_flush want %lld\n", want_flush_seq); 1189 mutex_lock(&mdsc->mutex); 1190 for (mds = 0; ret && mds < mdsc->max_sessions; mds++) { 1191 struct ceph_mds_session *session = mdsc->sessions[mds]; 1192 1193 if (!session) 1194 continue; 1195 get_session(session); 1196 mutex_unlock(&mdsc->mutex); 1197 1198 mutex_lock(&session->s_mutex); 1199 if (!list_empty(&session->s_cap_flushing)) { 1200 struct ceph_inode_info *ci = 1201 list_entry(session->s_cap_flushing.next, 1202 struct ceph_inode_info, 1203 i_flushing_item); 1204 struct inode *inode = &ci->vfs_inode; 1205 1206 spin_lock(&inode->i_lock); 1207 if (ci->i_cap_flush_seq <= want_flush_seq) { 1208 dout("check_cap_flush still flushing %p " 1209 "seq %lld <= %lld to mds%d\n", inode, 1210 ci->i_cap_flush_seq, want_flush_seq, 1211 session->s_mds); 1212 ret = 0; 1213 } 1214 spin_unlock(&inode->i_lock); 1215 } 1216 mutex_unlock(&session->s_mutex); 1217 ceph_put_mds_session(session); 1218 1219 if (!ret) 1220 return ret; 1221 mutex_lock(&mdsc->mutex); 1222 } 1223 1224 mutex_unlock(&mdsc->mutex); 1225 dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq); 1226 return ret; 1227 } 1228 1229 /* 1230 * called under s_mutex 1231 */ 1232 void ceph_send_cap_releases(struct ceph_mds_client *mdsc, 1233 struct ceph_mds_session *session) 1234 { 1235 struct ceph_msg *msg; 1236 1237 dout("send_cap_releases mds%d\n", session->s_mds); 1238 spin_lock(&session->s_cap_lock); 1239 while (!list_empty(&session->s_cap_releases_done)) { 1240 msg = list_first_entry(&session->s_cap_releases_done, 1241 struct ceph_msg, list_head); 1242 list_del_init(&msg->list_head); 1243 spin_unlock(&session->s_cap_lock); 1244 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 1245 dout("send_cap_releases mds%d %p\n", session->s_mds, msg); 1246 ceph_con_send(&session->s_con, msg); 1247 spin_lock(&session->s_cap_lock); 1248 } 1249 spin_unlock(&session->s_cap_lock); 1250 } 1251 1252 static void discard_cap_releases(struct ceph_mds_client *mdsc, 1253 struct ceph_mds_session *session) 1254 { 1255 struct ceph_msg *msg; 1256 struct ceph_mds_cap_release *head; 1257 unsigned num; 1258 1259 dout("discard_cap_releases mds%d\n", session->s_mds); 1260 spin_lock(&session->s_cap_lock); 1261 1262 /* zero out the in-progress message */ 1263 msg = list_first_entry(&session->s_cap_releases, 1264 struct ceph_msg, list_head); 1265 head = msg->front.iov_base; 1266 num = le32_to_cpu(head->num); 1267 dout("discard_cap_releases mds%d %p %u\n", session->s_mds, msg, num); 1268 head->num = cpu_to_le32(0); 1269 session->s_num_cap_releases += num; 1270 1271 /* requeue completed messages */ 1272 while (!list_empty(&session->s_cap_releases_done)) { 1273 msg = list_first_entry(&session->s_cap_releases_done, 1274 struct ceph_msg, list_head); 1275 list_del_init(&msg->list_head); 1276 1277 head = msg->front.iov_base; 1278 num = le32_to_cpu(head->num); 1279 dout("discard_cap_releases mds%d %p %u\n", session->s_mds, msg, 1280 num); 1281 session->s_num_cap_releases += num; 1282 head->num = cpu_to_le32(0); 1283 msg->front.iov_len = sizeof(*head); 1284 list_add(&msg->list_head, &session->s_cap_releases); 1285 } 1286 1287 spin_unlock(&session->s_cap_lock); 1288 } 1289 1290 /* 1291 * requests 1292 */ 1293 1294 /* 1295 * Create an mds request. 1296 */ 1297 struct ceph_mds_request * 1298 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) 1299 { 1300 struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); 1301 1302 if (!req) 1303 return ERR_PTR(-ENOMEM); 1304 1305 mutex_init(&req->r_fill_mutex); 1306 req->r_mdsc = mdsc; 1307 req->r_started = jiffies; 1308 req->r_resend_mds = -1; 1309 INIT_LIST_HEAD(&req->r_unsafe_dir_item); 1310 req->r_fmode = -1; 1311 kref_init(&req->r_kref); 1312 INIT_LIST_HEAD(&req->r_wait); 1313 init_completion(&req->r_completion); 1314 init_completion(&req->r_safe_completion); 1315 INIT_LIST_HEAD(&req->r_unsafe_item); 1316 1317 req->r_op = op; 1318 req->r_direct_mode = mode; 1319 return req; 1320 } 1321 1322 /* 1323 * return oldest (lowest) request, tid in request tree, 0 if none. 1324 * 1325 * called under mdsc->mutex. 1326 */ 1327 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc) 1328 { 1329 if (RB_EMPTY_ROOT(&mdsc->request_tree)) 1330 return NULL; 1331 return rb_entry(rb_first(&mdsc->request_tree), 1332 struct ceph_mds_request, r_node); 1333 } 1334 1335 static u64 __get_oldest_tid(struct ceph_mds_client *mdsc) 1336 { 1337 struct ceph_mds_request *req = __get_oldest_req(mdsc); 1338 1339 if (req) 1340 return req->r_tid; 1341 return 0; 1342 } 1343 1344 /* 1345 * Build a dentry's path. Allocate on heap; caller must kfree. Based 1346 * on build_path_from_dentry in fs/cifs/dir.c. 1347 * 1348 * If @stop_on_nosnap, generate path relative to the first non-snapped 1349 * inode. 1350 * 1351 * Encode hidden .snap dirs as a double /, i.e. 1352 * foo/.snap/bar -> foo//bar 1353 */ 1354 char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base, 1355 int stop_on_nosnap) 1356 { 1357 struct dentry *temp; 1358 char *path; 1359 int len, pos; 1360 1361 if (dentry == NULL) 1362 return ERR_PTR(-EINVAL); 1363 1364 retry: 1365 len = 0; 1366 for (temp = dentry; !IS_ROOT(temp);) { 1367 struct inode *inode = temp->d_inode; 1368 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) 1369 len++; /* slash only */ 1370 else if (stop_on_nosnap && inode && 1371 ceph_snap(inode) == CEPH_NOSNAP) 1372 break; 1373 else 1374 len += 1 + temp->d_name.len; 1375 temp = temp->d_parent; 1376 if (temp == NULL) { 1377 pr_err("build_path corrupt dentry %p\n", dentry); 1378 return ERR_PTR(-EINVAL); 1379 } 1380 } 1381 if (len) 1382 len--; /* no leading '/' */ 1383 1384 path = kmalloc(len+1, GFP_NOFS); 1385 if (path == NULL) 1386 return ERR_PTR(-ENOMEM); 1387 pos = len; 1388 path[pos] = 0; /* trailing null */ 1389 for (temp = dentry; !IS_ROOT(temp) && pos != 0; ) { 1390 struct inode *inode = temp->d_inode; 1391 1392 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) { 1393 dout("build_path path+%d: %p SNAPDIR\n", 1394 pos, temp); 1395 } else if (stop_on_nosnap && inode && 1396 ceph_snap(inode) == CEPH_NOSNAP) { 1397 break; 1398 } else { 1399 pos -= temp->d_name.len; 1400 if (pos < 0) 1401 break; 1402 strncpy(path + pos, temp->d_name.name, 1403 temp->d_name.len); 1404 } 1405 if (pos) 1406 path[--pos] = '/'; 1407 temp = temp->d_parent; 1408 if (temp == NULL) { 1409 pr_err("build_path corrupt dentry\n"); 1410 kfree(path); 1411 return ERR_PTR(-EINVAL); 1412 } 1413 } 1414 if (pos != 0) { 1415 pr_err("build_path did not end path lookup where " 1416 "expected, namelen is %d, pos is %d\n", len, pos); 1417 /* presumably this is only possible if racing with a 1418 rename of one of the parent directories (we can not 1419 lock the dentries above us to prevent this, but 1420 retrying should be harmless) */ 1421 kfree(path); 1422 goto retry; 1423 } 1424 1425 *base = ceph_ino(temp->d_inode); 1426 *plen = len; 1427 dout("build_path on %p %d built %llx '%.*s'\n", 1428 dentry, atomic_read(&dentry->d_count), *base, len, path); 1429 return path; 1430 } 1431 1432 static int build_dentry_path(struct dentry *dentry, 1433 const char **ppath, int *ppathlen, u64 *pino, 1434 int *pfreepath) 1435 { 1436 char *path; 1437 1438 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) { 1439 *pino = ceph_ino(dentry->d_parent->d_inode); 1440 *ppath = dentry->d_name.name; 1441 *ppathlen = dentry->d_name.len; 1442 return 0; 1443 } 1444 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); 1445 if (IS_ERR(path)) 1446 return PTR_ERR(path); 1447 *ppath = path; 1448 *pfreepath = 1; 1449 return 0; 1450 } 1451 1452 static int build_inode_path(struct inode *inode, 1453 const char **ppath, int *ppathlen, u64 *pino, 1454 int *pfreepath) 1455 { 1456 struct dentry *dentry; 1457 char *path; 1458 1459 if (ceph_snap(inode) == CEPH_NOSNAP) { 1460 *pino = ceph_ino(inode); 1461 *ppathlen = 0; 1462 return 0; 1463 } 1464 dentry = d_find_alias(inode); 1465 path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1); 1466 dput(dentry); 1467 if (IS_ERR(path)) 1468 return PTR_ERR(path); 1469 *ppath = path; 1470 *pfreepath = 1; 1471 return 0; 1472 } 1473 1474 /* 1475 * request arguments may be specified via an inode *, a dentry *, or 1476 * an explicit ino+path. 1477 */ 1478 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry, 1479 const char *rpath, u64 rino, 1480 const char **ppath, int *pathlen, 1481 u64 *ino, int *freepath) 1482 { 1483 int r = 0; 1484 1485 if (rinode) { 1486 r = build_inode_path(rinode, ppath, pathlen, ino, freepath); 1487 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode), 1488 ceph_snap(rinode)); 1489 } else if (rdentry) { 1490 r = build_dentry_path(rdentry, ppath, pathlen, ino, freepath); 1491 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen, 1492 *ppath); 1493 } else if (rpath) { 1494 *ino = rino; 1495 *ppath = rpath; 1496 *pathlen = strlen(rpath); 1497 dout(" path %.*s\n", *pathlen, rpath); 1498 } 1499 1500 return r; 1501 } 1502 1503 /* 1504 * called under mdsc->mutex 1505 */ 1506 static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, 1507 struct ceph_mds_request *req, 1508 int mds) 1509 { 1510 struct ceph_msg *msg; 1511 struct ceph_mds_request_head *head; 1512 const char *path1 = NULL; 1513 const char *path2 = NULL; 1514 u64 ino1 = 0, ino2 = 0; 1515 int pathlen1 = 0, pathlen2 = 0; 1516 int freepath1 = 0, freepath2 = 0; 1517 int len; 1518 u16 releases; 1519 void *p, *end; 1520 int ret; 1521 1522 ret = set_request_path_attr(req->r_inode, req->r_dentry, 1523 req->r_path1, req->r_ino1.ino, 1524 &path1, &pathlen1, &ino1, &freepath1); 1525 if (ret < 0) { 1526 msg = ERR_PTR(ret); 1527 goto out; 1528 } 1529 1530 ret = set_request_path_attr(NULL, req->r_old_dentry, 1531 req->r_path2, req->r_ino2.ino, 1532 &path2, &pathlen2, &ino2, &freepath2); 1533 if (ret < 0) { 1534 msg = ERR_PTR(ret); 1535 goto out_free1; 1536 } 1537 1538 len = sizeof(*head) + 1539 pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)); 1540 1541 /* calculate (max) length for cap releases */ 1542 len += sizeof(struct ceph_mds_request_release) * 1543 (!!req->r_inode_drop + !!req->r_dentry_drop + 1544 !!req->r_old_inode_drop + !!req->r_old_dentry_drop); 1545 if (req->r_dentry_drop) 1546 len += req->r_dentry->d_name.len; 1547 if (req->r_old_dentry_drop) 1548 len += req->r_old_dentry->d_name.len; 1549 1550 msg = ceph_msg_new(CEPH_MSG_CLIENT_REQUEST, len, GFP_NOFS); 1551 if (!msg) { 1552 msg = ERR_PTR(-ENOMEM); 1553 goto out_free2; 1554 } 1555 1556 msg->hdr.tid = cpu_to_le64(req->r_tid); 1557 1558 head = msg->front.iov_base; 1559 p = msg->front.iov_base + sizeof(*head); 1560 end = msg->front.iov_base + msg->front.iov_len; 1561 1562 head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); 1563 head->op = cpu_to_le32(req->r_op); 1564 head->caller_uid = cpu_to_le32(current_fsuid()); 1565 head->caller_gid = cpu_to_le32(current_fsgid()); 1566 head->args = req->r_args; 1567 1568 ceph_encode_filepath(&p, end, ino1, path1); 1569 ceph_encode_filepath(&p, end, ino2, path2); 1570 1571 /* make note of release offset, in case we need to replay */ 1572 req->r_request_release_offset = p - msg->front.iov_base; 1573 1574 /* cap releases */ 1575 releases = 0; 1576 if (req->r_inode_drop) 1577 releases += ceph_encode_inode_release(&p, 1578 req->r_inode ? req->r_inode : req->r_dentry->d_inode, 1579 mds, req->r_inode_drop, req->r_inode_unless, 0); 1580 if (req->r_dentry_drop) 1581 releases += ceph_encode_dentry_release(&p, req->r_dentry, 1582 mds, req->r_dentry_drop, req->r_dentry_unless); 1583 if (req->r_old_dentry_drop) 1584 releases += ceph_encode_dentry_release(&p, req->r_old_dentry, 1585 mds, req->r_old_dentry_drop, req->r_old_dentry_unless); 1586 if (req->r_old_inode_drop) 1587 releases += ceph_encode_inode_release(&p, 1588 req->r_old_dentry->d_inode, 1589 mds, req->r_old_inode_drop, req->r_old_inode_unless, 0); 1590 head->num_releases = cpu_to_le16(releases); 1591 1592 BUG_ON(p > end); 1593 msg->front.iov_len = p - msg->front.iov_base; 1594 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); 1595 1596 msg->pages = req->r_pages; 1597 msg->nr_pages = req->r_num_pages; 1598 msg->hdr.data_len = cpu_to_le32(req->r_data_len); 1599 msg->hdr.data_off = cpu_to_le16(0); 1600 1601 out_free2: 1602 if (freepath2) 1603 kfree((char *)path2); 1604 out_free1: 1605 if (freepath1) 1606 kfree((char *)path1); 1607 out: 1608 return msg; 1609 } 1610 1611 /* 1612 * called under mdsc->mutex if error, under no mutex if 1613 * success. 1614 */ 1615 static void complete_request(struct ceph_mds_client *mdsc, 1616 struct ceph_mds_request *req) 1617 { 1618 if (req->r_callback) 1619 req->r_callback(mdsc, req); 1620 else 1621 complete_all(&req->r_completion); 1622 } 1623 1624 /* 1625 * called under mdsc->mutex 1626 */ 1627 static int __prepare_send_request(struct ceph_mds_client *mdsc, 1628 struct ceph_mds_request *req, 1629 int mds) 1630 { 1631 struct ceph_mds_request_head *rhead; 1632 struct ceph_msg *msg; 1633 int flags = 0; 1634 1635 req->r_mds = mds; 1636 req->r_attempts++; 1637 if (req->r_inode) { 1638 struct ceph_cap *cap = 1639 ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds); 1640 1641 if (cap) 1642 req->r_sent_on_mseq = cap->mseq; 1643 else 1644 req->r_sent_on_mseq = -1; 1645 } 1646 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req, 1647 req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); 1648 1649 if (req->r_got_unsafe) { 1650 /* 1651 * Replay. Do not regenerate message (and rebuild 1652 * paths, etc.); just use the original message. 1653 * Rebuilding paths will break for renames because 1654 * d_move mangles the src name. 1655 */ 1656 msg = req->r_request; 1657 rhead = msg->front.iov_base; 1658 1659 flags = le32_to_cpu(rhead->flags); 1660 flags |= CEPH_MDS_FLAG_REPLAY; 1661 rhead->flags = cpu_to_le32(flags); 1662 1663 if (req->r_target_inode) 1664 rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); 1665 1666 rhead->num_retry = req->r_attempts - 1; 1667 1668 /* remove cap/dentry releases from message */ 1669 rhead->num_releases = 0; 1670 msg->hdr.front_len = cpu_to_le32(req->r_request_release_offset); 1671 msg->front.iov_len = req->r_request_release_offset; 1672 return 0; 1673 } 1674 1675 if (req->r_request) { 1676 ceph_msg_put(req->r_request); 1677 req->r_request = NULL; 1678 } 1679 msg = create_request_message(mdsc, req, mds); 1680 if (IS_ERR(msg)) { 1681 req->r_err = PTR_ERR(msg); 1682 complete_request(mdsc, req); 1683 return PTR_ERR(msg); 1684 } 1685 req->r_request = msg; 1686 1687 rhead = msg->front.iov_base; 1688 rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc)); 1689 if (req->r_got_unsafe) 1690 flags |= CEPH_MDS_FLAG_REPLAY; 1691 if (req->r_locked_dir) 1692 flags |= CEPH_MDS_FLAG_WANT_DENTRY; 1693 rhead->flags = cpu_to_le32(flags); 1694 rhead->num_fwd = req->r_num_fwd; 1695 rhead->num_retry = req->r_attempts - 1; 1696 rhead->ino = 0; 1697 1698 dout(" r_locked_dir = %p\n", req->r_locked_dir); 1699 return 0; 1700 } 1701 1702 /* 1703 * send request, or put it on the appropriate wait list. 1704 */ 1705 static int __do_request(struct ceph_mds_client *mdsc, 1706 struct ceph_mds_request *req) 1707 { 1708 struct ceph_mds_session *session = NULL; 1709 int mds = -1; 1710 int err = -EAGAIN; 1711 1712 if (req->r_err || req->r_got_result) 1713 goto out; 1714 1715 if (req->r_timeout && 1716 time_after_eq(jiffies, req->r_started + req->r_timeout)) { 1717 dout("do_request timed out\n"); 1718 err = -EIO; 1719 goto finish; 1720 } 1721 1722 mds = __choose_mds(mdsc, req); 1723 if (mds < 0 || 1724 ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) { 1725 dout("do_request no mds or not active, waiting for map\n"); 1726 list_add(&req->r_wait, &mdsc->waiting_for_map); 1727 goto out; 1728 } 1729 1730 /* get, open session */ 1731 session = __ceph_lookup_mds_session(mdsc, mds); 1732 if (!session) { 1733 session = register_session(mdsc, mds); 1734 if (IS_ERR(session)) { 1735 err = PTR_ERR(session); 1736 goto finish; 1737 } 1738 } 1739 dout("do_request mds%d session %p state %s\n", mds, session, 1740 session_state_name(session->s_state)); 1741 if (session->s_state != CEPH_MDS_SESSION_OPEN && 1742 session->s_state != CEPH_MDS_SESSION_HUNG) { 1743 if (session->s_state == CEPH_MDS_SESSION_NEW || 1744 session->s_state == CEPH_MDS_SESSION_CLOSING) 1745 __open_session(mdsc, session); 1746 list_add(&req->r_wait, &session->s_waiting); 1747 goto out_session; 1748 } 1749 1750 /* send request */ 1751 req->r_session = get_session(session); 1752 req->r_resend_mds = -1; /* forget any previous mds hint */ 1753 1754 if (req->r_request_started == 0) /* note request start time */ 1755 req->r_request_started = jiffies; 1756 1757 err = __prepare_send_request(mdsc, req, mds); 1758 if (!err) { 1759 ceph_msg_get(req->r_request); 1760 ceph_con_send(&session->s_con, req->r_request); 1761 } 1762 1763 out_session: 1764 ceph_put_mds_session(session); 1765 out: 1766 return err; 1767 1768 finish: 1769 req->r_err = err; 1770 complete_request(mdsc, req); 1771 goto out; 1772 } 1773 1774 /* 1775 * called under mdsc->mutex 1776 */ 1777 static void __wake_requests(struct ceph_mds_client *mdsc, 1778 struct list_head *head) 1779 { 1780 struct ceph_mds_request *req, *nreq; 1781 1782 list_for_each_entry_safe(req, nreq, head, r_wait) { 1783 list_del_init(&req->r_wait); 1784 __do_request(mdsc, req); 1785 } 1786 } 1787 1788 /* 1789 * Wake up threads with requests pending for @mds, so that they can 1790 * resubmit their requests to a possibly different mds. 1791 */ 1792 static void kick_requests(struct ceph_mds_client *mdsc, int mds) 1793 { 1794 struct ceph_mds_request *req; 1795 struct rb_node *p; 1796 1797 dout("kick_requests mds%d\n", mds); 1798 for (p = rb_first(&mdsc->request_tree); p; p = rb_next(p)) { 1799 req = rb_entry(p, struct ceph_mds_request, r_node); 1800 if (req->r_got_unsafe) 1801 continue; 1802 if (req->r_session && 1803 req->r_session->s_mds == mds) { 1804 dout(" kicking tid %llu\n", req->r_tid); 1805 put_request_session(req); 1806 __do_request(mdsc, req); 1807 } 1808 } 1809 } 1810 1811 void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, 1812 struct ceph_mds_request *req) 1813 { 1814 dout("submit_request on %p\n", req); 1815 mutex_lock(&mdsc->mutex); 1816 __register_request(mdsc, req, NULL); 1817 __do_request(mdsc, req); 1818 mutex_unlock(&mdsc->mutex); 1819 } 1820 1821 /* 1822 * Synchrously perform an mds request. Take care of all of the 1823 * session setup, forwarding, retry details. 1824 */ 1825 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, 1826 struct inode *dir, 1827 struct ceph_mds_request *req) 1828 { 1829 int err; 1830 1831 dout("do_request on %p\n", req); 1832 1833 /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */ 1834 if (req->r_inode) 1835 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); 1836 if (req->r_locked_dir) 1837 ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN); 1838 if (req->r_old_dentry) 1839 ceph_get_cap_refs( 1840 ceph_inode(req->r_old_dentry->d_parent->d_inode), 1841 CEPH_CAP_PIN); 1842 1843 /* issue */ 1844 mutex_lock(&mdsc->mutex); 1845 __register_request(mdsc, req, dir); 1846 __do_request(mdsc, req); 1847 1848 if (req->r_err) { 1849 err = req->r_err; 1850 __unregister_request(mdsc, req); 1851 dout("do_request early error %d\n", err); 1852 goto out; 1853 } 1854 1855 /* wait */ 1856 mutex_unlock(&mdsc->mutex); 1857 dout("do_request waiting\n"); 1858 if (req->r_timeout) { 1859 err = (long)wait_for_completion_killable_timeout( 1860 &req->r_completion, req->r_timeout); 1861 if (err == 0) 1862 err = -EIO; 1863 } else { 1864 err = wait_for_completion_killable(&req->r_completion); 1865 } 1866 dout("do_request waited, got %d\n", err); 1867 mutex_lock(&mdsc->mutex); 1868 1869 /* only abort if we didn't race with a real reply */ 1870 if (req->r_got_result) { 1871 err = le32_to_cpu(req->r_reply_info.head->result); 1872 } else if (err < 0) { 1873 dout("aborted request %lld with %d\n", req->r_tid, err); 1874 1875 /* 1876 * ensure we aren't running concurrently with 1877 * ceph_fill_trace or ceph_readdir_prepopulate, which 1878 * rely on locks (dir mutex) held by our caller. 1879 */ 1880 mutex_lock(&req->r_fill_mutex); 1881 req->r_err = err; 1882 req->r_aborted = true; 1883 mutex_unlock(&req->r_fill_mutex); 1884 1885 if (req->r_locked_dir && 1886 (req->r_op & CEPH_MDS_OP_WRITE)) 1887 ceph_invalidate_dir_request(req); 1888 } else { 1889 err = req->r_err; 1890 } 1891 1892 out: 1893 mutex_unlock(&mdsc->mutex); 1894 dout("do_request %p done, result %d\n", req, err); 1895 return err; 1896 } 1897 1898 /* 1899 * Invalidate dir I_COMPLETE, dentry lease state on an aborted MDS 1900 * namespace request. 1901 */ 1902 void ceph_invalidate_dir_request(struct ceph_mds_request *req) 1903 { 1904 struct inode *inode = req->r_locked_dir; 1905 struct ceph_inode_info *ci = ceph_inode(inode); 1906 1907 dout("invalidate_dir_request %p (I_COMPLETE, lease(s))\n", inode); 1908 spin_lock(&inode->i_lock); 1909 ci->i_ceph_flags &= ~CEPH_I_COMPLETE; 1910 ci->i_release_count++; 1911 spin_unlock(&inode->i_lock); 1912 1913 if (req->r_dentry) 1914 ceph_invalidate_dentry_lease(req->r_dentry); 1915 if (req->r_old_dentry) 1916 ceph_invalidate_dentry_lease(req->r_old_dentry); 1917 } 1918 1919 /* 1920 * Handle mds reply. 1921 * 1922 * We take the session mutex and parse and process the reply immediately. 1923 * This preserves the logical ordering of replies, capabilities, etc., sent 1924 * by the MDS as they are applied to our local cache. 1925 */ 1926 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) 1927 { 1928 struct ceph_mds_client *mdsc = session->s_mdsc; 1929 struct ceph_mds_request *req; 1930 struct ceph_mds_reply_head *head = msg->front.iov_base; 1931 struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */ 1932 u64 tid; 1933 int err, result; 1934 int mds = session->s_mds; 1935 1936 if (msg->front.iov_len < sizeof(*head)) { 1937 pr_err("mdsc_handle_reply got corrupt (short) reply\n"); 1938 ceph_msg_dump(msg); 1939 return; 1940 } 1941 1942 /* get request, session */ 1943 tid = le64_to_cpu(msg->hdr.tid); 1944 mutex_lock(&mdsc->mutex); 1945 req = __lookup_request(mdsc, tid); 1946 if (!req) { 1947 dout("handle_reply on unknown tid %llu\n", tid); 1948 mutex_unlock(&mdsc->mutex); 1949 return; 1950 } 1951 dout("handle_reply %p\n", req); 1952 1953 /* correct session? */ 1954 if (req->r_session != session) { 1955 pr_err("mdsc_handle_reply got %llu on session mds%d" 1956 " not mds%d\n", tid, session->s_mds, 1957 req->r_session ? req->r_session->s_mds : -1); 1958 mutex_unlock(&mdsc->mutex); 1959 goto out; 1960 } 1961 1962 /* dup? */ 1963 if ((req->r_got_unsafe && !head->safe) || 1964 (req->r_got_safe && head->safe)) { 1965 pr_warning("got a dup %s reply on %llu from mds%d\n", 1966 head->safe ? "safe" : "unsafe", tid, mds); 1967 mutex_unlock(&mdsc->mutex); 1968 goto out; 1969 } 1970 if (req->r_got_safe && !head->safe) { 1971 pr_warning("got unsafe after safe on %llu from mds%d\n", 1972 tid, mds); 1973 mutex_unlock(&mdsc->mutex); 1974 goto out; 1975 } 1976 1977 result = le32_to_cpu(head->result); 1978 1979 /* 1980 * Handle an ESTALE 1981 * if we're not talking to the authority, send to them 1982 * if the authority has changed while we weren't looking, 1983 * send to new authority 1984 * Otherwise we just have to return an ESTALE 1985 */ 1986 if (result == -ESTALE) { 1987 dout("got ESTALE on request %llu", req->r_tid); 1988 if (!req->r_inode) { 1989 /* do nothing; not an authority problem */ 1990 } else if (req->r_direct_mode != USE_AUTH_MDS) { 1991 dout("not using auth, setting for that now"); 1992 req->r_direct_mode = USE_AUTH_MDS; 1993 __do_request(mdsc, req); 1994 mutex_unlock(&mdsc->mutex); 1995 goto out; 1996 } else { 1997 struct ceph_inode_info *ci = ceph_inode(req->r_inode); 1998 struct ceph_cap *cap = 1999 ceph_get_cap_for_mds(ci, req->r_mds);; 2000 2001 dout("already using auth"); 2002 if ((!cap || cap != ci->i_auth_cap) || 2003 (cap->mseq != req->r_sent_on_mseq)) { 2004 dout("but cap changed, so resending"); 2005 __do_request(mdsc, req); 2006 mutex_unlock(&mdsc->mutex); 2007 goto out; 2008 } 2009 } 2010 dout("have to return ESTALE on request %llu", req->r_tid); 2011 } 2012 2013 2014 if (head->safe) { 2015 req->r_got_safe = true; 2016 __unregister_request(mdsc, req); 2017 complete_all(&req->r_safe_completion); 2018 2019 if (req->r_got_unsafe) { 2020 /* 2021 * We already handled the unsafe response, now do the 2022 * cleanup. No need to examine the response; the MDS 2023 * doesn't include any result info in the safe 2024 * response. And even if it did, there is nothing 2025 * useful we could do with a revised return value. 2026 */ 2027 dout("got safe reply %llu, mds%d\n", tid, mds); 2028 list_del_init(&req->r_unsafe_item); 2029 2030 /* last unsafe request during umount? */ 2031 if (mdsc->stopping && !__get_oldest_req(mdsc)) 2032 complete_all(&mdsc->safe_umount_waiters); 2033 mutex_unlock(&mdsc->mutex); 2034 goto out; 2035 } 2036 } else { 2037 req->r_got_unsafe = true; 2038 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe); 2039 } 2040 2041 dout("handle_reply tid %lld result %d\n", tid, result); 2042 rinfo = &req->r_reply_info; 2043 err = parse_reply_info(msg, rinfo); 2044 mutex_unlock(&mdsc->mutex); 2045 2046 mutex_lock(&session->s_mutex); 2047 if (err < 0) { 2048 pr_err("mdsc_handle_reply got corrupt reply mds%d\n", mds); 2049 ceph_msg_dump(msg); 2050 goto out_err; 2051 } 2052 2053 /* snap trace */ 2054 if (rinfo->snapblob_len) { 2055 down_write(&mdsc->snap_rwsem); 2056 ceph_update_snap_trace(mdsc, rinfo->snapblob, 2057 rinfo->snapblob + rinfo->snapblob_len, 2058 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP); 2059 downgrade_write(&mdsc->snap_rwsem); 2060 } else { 2061 down_read(&mdsc->snap_rwsem); 2062 } 2063 2064 /* insert trace into our cache */ 2065 mutex_lock(&req->r_fill_mutex); 2066 err = ceph_fill_trace(mdsc->client->sb, req, req->r_session); 2067 if (err == 0) { 2068 if (result == 0 && rinfo->dir_nr) 2069 ceph_readdir_prepopulate(req, req->r_session); 2070 ceph_unreserve_caps(mdsc, &req->r_caps_reservation); 2071 } 2072 mutex_unlock(&req->r_fill_mutex); 2073 2074 up_read(&mdsc->snap_rwsem); 2075 out_err: 2076 mutex_lock(&mdsc->mutex); 2077 if (!req->r_aborted) { 2078 if (err) { 2079 req->r_err = err; 2080 } else { 2081 req->r_reply = msg; 2082 ceph_msg_get(msg); 2083 req->r_got_result = true; 2084 } 2085 } else { 2086 dout("reply arrived after request %lld was aborted\n", tid); 2087 } 2088 mutex_unlock(&mdsc->mutex); 2089 2090 ceph_add_cap_releases(mdsc, req->r_session); 2091 mutex_unlock(&session->s_mutex); 2092 2093 /* kick calling process */ 2094 complete_request(mdsc, req); 2095 out: 2096 ceph_mdsc_put_request(req); 2097 return; 2098 } 2099 2100 2101 2102 /* 2103 * handle mds notification that our request has been forwarded. 2104 */ 2105 static void handle_forward(struct ceph_mds_client *mdsc, 2106 struct ceph_mds_session *session, 2107 struct ceph_msg *msg) 2108 { 2109 struct ceph_mds_request *req; 2110 u64 tid = le64_to_cpu(msg->hdr.tid); 2111 u32 next_mds; 2112 u32 fwd_seq; 2113 int err = -EINVAL; 2114 void *p = msg->front.iov_base; 2115 void *end = p + msg->front.iov_len; 2116 2117 ceph_decode_need(&p, end, 2*sizeof(u32), bad); 2118 next_mds = ceph_decode_32(&p); 2119 fwd_seq = ceph_decode_32(&p); 2120 2121 mutex_lock(&mdsc->mutex); 2122 req = __lookup_request(mdsc, tid); 2123 if (!req) { 2124 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds); 2125 goto out; /* dup reply? */ 2126 } 2127 2128 if (req->r_aborted) { 2129 dout("forward tid %llu aborted, unregistering\n", tid); 2130 __unregister_request(mdsc, req); 2131 } else if (fwd_seq <= req->r_num_fwd) { 2132 dout("forward tid %llu to mds%d - old seq %d <= %d\n", 2133 tid, next_mds, req->r_num_fwd, fwd_seq); 2134 } else { 2135 /* resend. forward race not possible; mds would drop */ 2136 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds); 2137 BUG_ON(req->r_err); 2138 BUG_ON(req->r_got_result); 2139 req->r_num_fwd = fwd_seq; 2140 req->r_resend_mds = next_mds; 2141 put_request_session(req); 2142 __do_request(mdsc, req); 2143 } 2144 ceph_mdsc_put_request(req); 2145 out: 2146 mutex_unlock(&mdsc->mutex); 2147 return; 2148 2149 bad: 2150 pr_err("mdsc_handle_forward decode error err=%d\n", err); 2151 } 2152 2153 /* 2154 * handle a mds session control message 2155 */ 2156 static void handle_session(struct ceph_mds_session *session, 2157 struct ceph_msg *msg) 2158 { 2159 struct ceph_mds_client *mdsc = session->s_mdsc; 2160 u32 op; 2161 u64 seq; 2162 int mds = session->s_mds; 2163 struct ceph_mds_session_head *h = msg->front.iov_base; 2164 int wake = 0; 2165 2166 /* decode */ 2167 if (msg->front.iov_len != sizeof(*h)) 2168 goto bad; 2169 op = le32_to_cpu(h->op); 2170 seq = le64_to_cpu(h->seq); 2171 2172 mutex_lock(&mdsc->mutex); 2173 if (op == CEPH_SESSION_CLOSE) 2174 __unregister_session(mdsc, session); 2175 /* FIXME: this ttl calculation is generous */ 2176 session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose; 2177 mutex_unlock(&mdsc->mutex); 2178 2179 mutex_lock(&session->s_mutex); 2180 2181 dout("handle_session mds%d %s %p state %s seq %llu\n", 2182 mds, ceph_session_op_name(op), session, 2183 session_state_name(session->s_state), seq); 2184 2185 if (session->s_state == CEPH_MDS_SESSION_HUNG) { 2186 session->s_state = CEPH_MDS_SESSION_OPEN; 2187 pr_info("mds%d came back\n", session->s_mds); 2188 } 2189 2190 switch (op) { 2191 case CEPH_SESSION_OPEN: 2192 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) 2193 pr_info("mds%d reconnect success\n", session->s_mds); 2194 session->s_state = CEPH_MDS_SESSION_OPEN; 2195 renewed_caps(mdsc, session, 0); 2196 wake = 1; 2197 if (mdsc->stopping) 2198 __close_session(mdsc, session); 2199 break; 2200 2201 case CEPH_SESSION_RENEWCAPS: 2202 if (session->s_renew_seq == seq) 2203 renewed_caps(mdsc, session, 1); 2204 break; 2205 2206 case CEPH_SESSION_CLOSE: 2207 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING) 2208 pr_info("mds%d reconnect denied\n", session->s_mds); 2209 remove_session_caps(session); 2210 wake = 1; /* for good measure */ 2211 complete_all(&mdsc->session_close_waiters); 2212 kick_requests(mdsc, mds); 2213 break; 2214 2215 case CEPH_SESSION_STALE: 2216 pr_info("mds%d caps went stale, renewing\n", 2217 session->s_mds); 2218 spin_lock(&session->s_cap_lock); 2219 session->s_cap_gen++; 2220 session->s_cap_ttl = 0; 2221 spin_unlock(&session->s_cap_lock); 2222 send_renew_caps(mdsc, session); 2223 break; 2224 2225 case CEPH_SESSION_RECALL_STATE: 2226 trim_caps(mdsc, session, le32_to_cpu(h->max_caps)); 2227 break; 2228 2229 default: 2230 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds); 2231 WARN_ON(1); 2232 } 2233 2234 mutex_unlock(&session->s_mutex); 2235 if (wake) { 2236 mutex_lock(&mdsc->mutex); 2237 __wake_requests(mdsc, &session->s_waiting); 2238 mutex_unlock(&mdsc->mutex); 2239 } 2240 return; 2241 2242 bad: 2243 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds, 2244 (int)msg->front.iov_len); 2245 ceph_msg_dump(msg); 2246 return; 2247 } 2248 2249 2250 /* 2251 * called under session->mutex. 2252 */ 2253 static void replay_unsafe_requests(struct ceph_mds_client *mdsc, 2254 struct ceph_mds_session *session) 2255 { 2256 struct ceph_mds_request *req, *nreq; 2257 int err; 2258 2259 dout("replay_unsafe_requests mds%d\n", session->s_mds); 2260 2261 mutex_lock(&mdsc->mutex); 2262 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { 2263 err = __prepare_send_request(mdsc, req, session->s_mds); 2264 if (!err) { 2265 ceph_msg_get(req->r_request); 2266 ceph_con_send(&session->s_con, req->r_request); 2267 } 2268 } 2269 mutex_unlock(&mdsc->mutex); 2270 } 2271 2272 /* 2273 * Encode information about a cap for a reconnect with the MDS. 2274 */ 2275 static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap, 2276 void *arg) 2277 { 2278 union { 2279 struct ceph_mds_cap_reconnect v2; 2280 struct ceph_mds_cap_reconnect_v1 v1; 2281 } rec; 2282 size_t reclen; 2283 struct ceph_inode_info *ci; 2284 struct ceph_reconnect_state *recon_state = arg; 2285 struct ceph_pagelist *pagelist = recon_state->pagelist; 2286 char *path; 2287 int pathlen, err; 2288 u64 pathbase; 2289 struct dentry *dentry; 2290 2291 ci = cap->ci; 2292 2293 dout(" adding %p ino %llx.%llx cap %p %lld %s\n", 2294 inode, ceph_vinop(inode), cap, cap->cap_id, 2295 ceph_cap_string(cap->issued)); 2296 err = ceph_pagelist_encode_64(pagelist, ceph_ino(inode)); 2297 if (err) 2298 return err; 2299 2300 dentry = d_find_alias(inode); 2301 if (dentry) { 2302 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase, 0); 2303 if (IS_ERR(path)) { 2304 err = PTR_ERR(path); 2305 BUG_ON(err); 2306 } 2307 } else { 2308 path = NULL; 2309 pathlen = 0; 2310 } 2311 err = ceph_pagelist_encode_string(pagelist, path, pathlen); 2312 if (err) 2313 goto out; 2314 2315 spin_lock(&inode->i_lock); 2316 cap->seq = 0; /* reset cap seq */ 2317 cap->issue_seq = 0; /* and issue_seq */ 2318 2319 if (recon_state->flock) { 2320 rec.v2.cap_id = cpu_to_le64(cap->cap_id); 2321 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); 2322 rec.v2.issued = cpu_to_le32(cap->issued); 2323 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); 2324 rec.v2.pathbase = cpu_to_le64(pathbase); 2325 rec.v2.flock_len = 0; 2326 reclen = sizeof(rec.v2); 2327 } else { 2328 rec.v1.cap_id = cpu_to_le64(cap->cap_id); 2329 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci)); 2330 rec.v1.issued = cpu_to_le32(cap->issued); 2331 rec.v1.size = cpu_to_le64(inode->i_size); 2332 ceph_encode_timespec(&rec.v1.mtime, &inode->i_mtime); 2333 ceph_encode_timespec(&rec.v1.atime, &inode->i_atime); 2334 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino); 2335 rec.v1.pathbase = cpu_to_le64(pathbase); 2336 reclen = sizeof(rec.v1); 2337 } 2338 spin_unlock(&inode->i_lock); 2339 2340 if (recon_state->flock) { 2341 int num_fcntl_locks, num_flock_locks; 2342 2343 lock_kernel(); 2344 ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks); 2345 rec.v2.flock_len = (2*sizeof(u32) + 2346 (num_fcntl_locks+num_flock_locks) * 2347 sizeof(struct ceph_filelock)); 2348 2349 err = ceph_pagelist_append(pagelist, &rec, reclen); 2350 if (!err) 2351 err = ceph_encode_locks(inode, pagelist, 2352 num_fcntl_locks, 2353 num_flock_locks); 2354 unlock_kernel(); 2355 } 2356 2357 out: 2358 kfree(path); 2359 dput(dentry); 2360 return err; 2361 } 2362 2363 2364 /* 2365 * If an MDS fails and recovers, clients need to reconnect in order to 2366 * reestablish shared state. This includes all caps issued through 2367 * this session _and_ the snap_realm hierarchy. Because it's not 2368 * clear which snap realms the mds cares about, we send everything we 2369 * know about.. that ensures we'll then get any new info the 2370 * recovering MDS might have. 2371 * 2372 * This is a relatively heavyweight operation, but it's rare. 2373 * 2374 * called with mdsc->mutex held. 2375 */ 2376 static void send_mds_reconnect(struct ceph_mds_client *mdsc, 2377 struct ceph_mds_session *session) 2378 { 2379 struct ceph_msg *reply; 2380 struct rb_node *p; 2381 int mds = session->s_mds; 2382 int err = -ENOMEM; 2383 struct ceph_pagelist *pagelist; 2384 struct ceph_reconnect_state recon_state; 2385 2386 pr_info("mds%d reconnect start\n", mds); 2387 2388 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS); 2389 if (!pagelist) 2390 goto fail_nopagelist; 2391 ceph_pagelist_init(pagelist); 2392 2393 reply = ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT, 0, GFP_NOFS); 2394 if (!reply) 2395 goto fail_nomsg; 2396 2397 mutex_lock(&session->s_mutex); 2398 session->s_state = CEPH_MDS_SESSION_RECONNECTING; 2399 session->s_seq = 0; 2400 2401 ceph_con_open(&session->s_con, 2402 ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); 2403 2404 /* replay unsafe requests */ 2405 replay_unsafe_requests(mdsc, session); 2406 2407 down_read(&mdsc->snap_rwsem); 2408 2409 dout("session %p state %s\n", session, 2410 session_state_name(session->s_state)); 2411 2412 /* drop old cap expires; we're about to reestablish that state */ 2413 discard_cap_releases(mdsc, session); 2414 2415 /* traverse this session's caps */ 2416 err = ceph_pagelist_encode_32(pagelist, session->s_nr_caps); 2417 if (err) 2418 goto fail; 2419 2420 recon_state.pagelist = pagelist; 2421 recon_state.flock = session->s_con.peer_features & CEPH_FEATURE_FLOCK; 2422 err = iterate_session_caps(session, encode_caps_cb, &recon_state); 2423 if (err < 0) 2424 goto fail; 2425 2426 /* 2427 * snaprealms. we provide mds with the ino, seq (version), and 2428 * parent for all of our realms. If the mds has any newer info, 2429 * it will tell us. 2430 */ 2431 for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) { 2432 struct ceph_snap_realm *realm = 2433 rb_entry(p, struct ceph_snap_realm, node); 2434 struct ceph_mds_snaprealm_reconnect sr_rec; 2435 2436 dout(" adding snap realm %llx seq %lld parent %llx\n", 2437 realm->ino, realm->seq, realm->parent_ino); 2438 sr_rec.ino = cpu_to_le64(realm->ino); 2439 sr_rec.seq = cpu_to_le64(realm->seq); 2440 sr_rec.parent = cpu_to_le64(realm->parent_ino); 2441 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec)); 2442 if (err) 2443 goto fail; 2444 } 2445 2446 reply->pagelist = pagelist; 2447 if (recon_state.flock) 2448 reply->hdr.version = cpu_to_le16(2); 2449 reply->hdr.data_len = cpu_to_le32(pagelist->length); 2450 reply->nr_pages = calc_pages_for(0, pagelist->length); 2451 ceph_con_send(&session->s_con, reply); 2452 2453 mutex_unlock(&session->s_mutex); 2454 2455 mutex_lock(&mdsc->mutex); 2456 __wake_requests(mdsc, &session->s_waiting); 2457 mutex_unlock(&mdsc->mutex); 2458 2459 up_read(&mdsc->snap_rwsem); 2460 return; 2461 2462 fail: 2463 ceph_msg_put(reply); 2464 up_read(&mdsc->snap_rwsem); 2465 mutex_unlock(&session->s_mutex); 2466 fail_nomsg: 2467 ceph_pagelist_release(pagelist); 2468 kfree(pagelist); 2469 fail_nopagelist: 2470 pr_err("error %d preparing reconnect for mds%d\n", err, mds); 2471 return; 2472 } 2473 2474 2475 /* 2476 * compare old and new mdsmaps, kicking requests 2477 * and closing out old connections as necessary 2478 * 2479 * called under mdsc->mutex. 2480 */ 2481 static void check_new_map(struct ceph_mds_client *mdsc, 2482 struct ceph_mdsmap *newmap, 2483 struct ceph_mdsmap *oldmap) 2484 { 2485 int i; 2486 int oldstate, newstate; 2487 struct ceph_mds_session *s; 2488 2489 dout("check_new_map new %u old %u\n", 2490 newmap->m_epoch, oldmap->m_epoch); 2491 2492 for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) { 2493 if (mdsc->sessions[i] == NULL) 2494 continue; 2495 s = mdsc->sessions[i]; 2496 oldstate = ceph_mdsmap_get_state(oldmap, i); 2497 newstate = ceph_mdsmap_get_state(newmap, i); 2498 2499 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n", 2500 i, ceph_mds_state_name(oldstate), 2501 ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "", 2502 ceph_mds_state_name(newstate), 2503 ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "", 2504 session_state_name(s->s_state)); 2505 2506 if (memcmp(ceph_mdsmap_get_addr(oldmap, i), 2507 ceph_mdsmap_get_addr(newmap, i), 2508 sizeof(struct ceph_entity_addr))) { 2509 if (s->s_state == CEPH_MDS_SESSION_OPENING) { 2510 /* the session never opened, just close it 2511 * out now */ 2512 __wake_requests(mdsc, &s->s_waiting); 2513 __unregister_session(mdsc, s); 2514 } else { 2515 /* just close it */ 2516 mutex_unlock(&mdsc->mutex); 2517 mutex_lock(&s->s_mutex); 2518 mutex_lock(&mdsc->mutex); 2519 ceph_con_close(&s->s_con); 2520 mutex_unlock(&s->s_mutex); 2521 s->s_state = CEPH_MDS_SESSION_RESTARTING; 2522 } 2523 2524 /* kick any requests waiting on the recovering mds */ 2525 kick_requests(mdsc, i); 2526 } else if (oldstate == newstate) { 2527 continue; /* nothing new with this mds */ 2528 } 2529 2530 /* 2531 * send reconnect? 2532 */ 2533 if (s->s_state == CEPH_MDS_SESSION_RESTARTING && 2534 newstate >= CEPH_MDS_STATE_RECONNECT) { 2535 mutex_unlock(&mdsc->mutex); 2536 send_mds_reconnect(mdsc, s); 2537 mutex_lock(&mdsc->mutex); 2538 } 2539 2540 /* 2541 * kick request on any mds that has gone active. 2542 */ 2543 if (oldstate < CEPH_MDS_STATE_ACTIVE && 2544 newstate >= CEPH_MDS_STATE_ACTIVE) { 2545 if (oldstate != CEPH_MDS_STATE_CREATING && 2546 oldstate != CEPH_MDS_STATE_STARTING) 2547 pr_info("mds%d recovery completed\n", s->s_mds); 2548 kick_requests(mdsc, i); 2549 ceph_kick_flushing_caps(mdsc, s); 2550 wake_up_session_caps(s, 1); 2551 } 2552 } 2553 2554 for (i = 0; i < newmap->m_max_mds && i < mdsc->max_sessions; i++) { 2555 s = mdsc->sessions[i]; 2556 if (!s) 2557 continue; 2558 if (!ceph_mdsmap_is_laggy(newmap, i)) 2559 continue; 2560 if (s->s_state == CEPH_MDS_SESSION_OPEN || 2561 s->s_state == CEPH_MDS_SESSION_HUNG || 2562 s->s_state == CEPH_MDS_SESSION_CLOSING) { 2563 dout(" connecting to export targets of laggy mds%d\n", 2564 i); 2565 __open_export_target_sessions(mdsc, s); 2566 } 2567 } 2568 } 2569 2570 2571 2572 /* 2573 * leases 2574 */ 2575 2576 /* 2577 * caller must hold session s_mutex, dentry->d_lock 2578 */ 2579 void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry) 2580 { 2581 struct ceph_dentry_info *di = ceph_dentry(dentry); 2582 2583 ceph_put_mds_session(di->lease_session); 2584 di->lease_session = NULL; 2585 } 2586 2587 static void handle_lease(struct ceph_mds_client *mdsc, 2588 struct ceph_mds_session *session, 2589 struct ceph_msg *msg) 2590 { 2591 struct super_block *sb = mdsc->client->sb; 2592 struct inode *inode; 2593 struct ceph_inode_info *ci; 2594 struct dentry *parent, *dentry; 2595 struct ceph_dentry_info *di; 2596 int mds = session->s_mds; 2597 struct ceph_mds_lease *h = msg->front.iov_base; 2598 u32 seq; 2599 struct ceph_vino vino; 2600 int mask; 2601 struct qstr dname; 2602 int release = 0; 2603 2604 dout("handle_lease from mds%d\n", mds); 2605 2606 /* decode */ 2607 if (msg->front.iov_len < sizeof(*h) + sizeof(u32)) 2608 goto bad; 2609 vino.ino = le64_to_cpu(h->ino); 2610 vino.snap = CEPH_NOSNAP; 2611 mask = le16_to_cpu(h->mask); 2612 seq = le32_to_cpu(h->seq); 2613 dname.name = (void *)h + sizeof(*h) + sizeof(u32); 2614 dname.len = msg->front.iov_len - sizeof(*h) - sizeof(u32); 2615 if (dname.len != get_unaligned_le32(h+1)) 2616 goto bad; 2617 2618 mutex_lock(&session->s_mutex); 2619 session->s_seq++; 2620 2621 /* lookup inode */ 2622 inode = ceph_find_inode(sb, vino); 2623 dout("handle_lease %s, mask %d, ino %llx %p %.*s\n", 2624 ceph_lease_op_name(h->action), mask, vino.ino, inode, 2625 dname.len, dname.name); 2626 if (inode == NULL) { 2627 dout("handle_lease no inode %llx\n", vino.ino); 2628 goto release; 2629 } 2630 ci = ceph_inode(inode); 2631 2632 /* dentry */ 2633 parent = d_find_alias(inode); 2634 if (!parent) { 2635 dout("no parent dentry on inode %p\n", inode); 2636 WARN_ON(1); 2637 goto release; /* hrm... */ 2638 } 2639 dname.hash = full_name_hash(dname.name, dname.len); 2640 dentry = d_lookup(parent, &dname); 2641 dput(parent); 2642 if (!dentry) 2643 goto release; 2644 2645 spin_lock(&dentry->d_lock); 2646 di = ceph_dentry(dentry); 2647 switch (h->action) { 2648 case CEPH_MDS_LEASE_REVOKE: 2649 if (di && di->lease_session == session) { 2650 if (ceph_seq_cmp(di->lease_seq, seq) > 0) 2651 h->seq = cpu_to_le32(di->lease_seq); 2652 __ceph_mdsc_drop_dentry_lease(dentry); 2653 } 2654 release = 1; 2655 break; 2656 2657 case CEPH_MDS_LEASE_RENEW: 2658 if (di && di->lease_session == session && 2659 di->lease_gen == session->s_cap_gen && 2660 di->lease_renew_from && 2661 di->lease_renew_after == 0) { 2662 unsigned long duration = 2663 le32_to_cpu(h->duration_ms) * HZ / 1000; 2664 2665 di->lease_seq = seq; 2666 dentry->d_time = di->lease_renew_from + duration; 2667 di->lease_renew_after = di->lease_renew_from + 2668 (duration >> 1); 2669 di->lease_renew_from = 0; 2670 } 2671 break; 2672 } 2673 spin_unlock(&dentry->d_lock); 2674 dput(dentry); 2675 2676 if (!release) 2677 goto out; 2678 2679 release: 2680 /* let's just reuse the same message */ 2681 h->action = CEPH_MDS_LEASE_REVOKE_ACK; 2682 ceph_msg_get(msg); 2683 ceph_con_send(&session->s_con, msg); 2684 2685 out: 2686 iput(inode); 2687 mutex_unlock(&session->s_mutex); 2688 return; 2689 2690 bad: 2691 pr_err("corrupt lease message\n"); 2692 ceph_msg_dump(msg); 2693 } 2694 2695 void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session, 2696 struct inode *inode, 2697 struct dentry *dentry, char action, 2698 u32 seq) 2699 { 2700 struct ceph_msg *msg; 2701 struct ceph_mds_lease *lease; 2702 int len = sizeof(*lease) + sizeof(u32); 2703 int dnamelen = 0; 2704 2705 dout("lease_send_msg inode %p dentry %p %s to mds%d\n", 2706 inode, dentry, ceph_lease_op_name(action), session->s_mds); 2707 dnamelen = dentry->d_name.len; 2708 len += dnamelen; 2709 2710 msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS); 2711 if (!msg) 2712 return; 2713 lease = msg->front.iov_base; 2714 lease->action = action; 2715 lease->mask = cpu_to_le16(1); 2716 lease->ino = cpu_to_le64(ceph_vino(inode).ino); 2717 lease->first = lease->last = cpu_to_le64(ceph_vino(inode).snap); 2718 lease->seq = cpu_to_le32(seq); 2719 put_unaligned_le32(dnamelen, lease + 1); 2720 memcpy((void *)(lease + 1) + 4, dentry->d_name.name, dnamelen); 2721 2722 /* 2723 * if this is a preemptive lease RELEASE, no need to 2724 * flush request stream, since the actual request will 2725 * soon follow. 2726 */ 2727 msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE); 2728 2729 ceph_con_send(&session->s_con, msg); 2730 } 2731 2732 /* 2733 * Preemptively release a lease we expect to invalidate anyway. 2734 * Pass @inode always, @dentry is optional. 2735 */ 2736 void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode, 2737 struct dentry *dentry, int mask) 2738 { 2739 struct ceph_dentry_info *di; 2740 struct ceph_mds_session *session; 2741 u32 seq; 2742 2743 BUG_ON(inode == NULL); 2744 BUG_ON(dentry == NULL); 2745 BUG_ON(mask == 0); 2746 2747 /* is dentry lease valid? */ 2748 spin_lock(&dentry->d_lock); 2749 di = ceph_dentry(dentry); 2750 if (!di || !di->lease_session || 2751 di->lease_session->s_mds < 0 || 2752 di->lease_gen != di->lease_session->s_cap_gen || 2753 !time_before(jiffies, dentry->d_time)) { 2754 dout("lease_release inode %p dentry %p -- " 2755 "no lease on %d\n", 2756 inode, dentry, mask); 2757 spin_unlock(&dentry->d_lock); 2758 return; 2759 } 2760 2761 /* we do have a lease on this dentry; note mds and seq */ 2762 session = ceph_get_mds_session(di->lease_session); 2763 seq = di->lease_seq; 2764 __ceph_mdsc_drop_dentry_lease(dentry); 2765 spin_unlock(&dentry->d_lock); 2766 2767 dout("lease_release inode %p dentry %p mask %d to mds%d\n", 2768 inode, dentry, mask, session->s_mds); 2769 ceph_mdsc_lease_send_msg(session, inode, dentry, 2770 CEPH_MDS_LEASE_RELEASE, seq); 2771 ceph_put_mds_session(session); 2772 } 2773 2774 /* 2775 * drop all leases (and dentry refs) in preparation for umount 2776 */ 2777 static void drop_leases(struct ceph_mds_client *mdsc) 2778 { 2779 int i; 2780 2781 dout("drop_leases\n"); 2782 mutex_lock(&mdsc->mutex); 2783 for (i = 0; i < mdsc->max_sessions; i++) { 2784 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); 2785 if (!s) 2786 continue; 2787 mutex_unlock(&mdsc->mutex); 2788 mutex_lock(&s->s_mutex); 2789 mutex_unlock(&s->s_mutex); 2790 ceph_put_mds_session(s); 2791 mutex_lock(&mdsc->mutex); 2792 } 2793 mutex_unlock(&mdsc->mutex); 2794 } 2795 2796 2797 2798 /* 2799 * delayed work -- periodically trim expired leases, renew caps with mds 2800 */ 2801 static void schedule_delayed(struct ceph_mds_client *mdsc) 2802 { 2803 int delay = 5; 2804 unsigned hz = round_jiffies_relative(HZ * delay); 2805 schedule_delayed_work(&mdsc->delayed_work, hz); 2806 } 2807 2808 static void delayed_work(struct work_struct *work) 2809 { 2810 int i; 2811 struct ceph_mds_client *mdsc = 2812 container_of(work, struct ceph_mds_client, delayed_work.work); 2813 int renew_interval; 2814 int renew_caps; 2815 2816 dout("mdsc delayed_work\n"); 2817 ceph_check_delayed_caps(mdsc); 2818 2819 mutex_lock(&mdsc->mutex); 2820 renew_interval = mdsc->mdsmap->m_session_timeout >> 2; 2821 renew_caps = time_after_eq(jiffies, HZ*renew_interval + 2822 mdsc->last_renew_caps); 2823 if (renew_caps) 2824 mdsc->last_renew_caps = jiffies; 2825 2826 for (i = 0; i < mdsc->max_sessions; i++) { 2827 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); 2828 if (s == NULL) 2829 continue; 2830 if (s->s_state == CEPH_MDS_SESSION_CLOSING) { 2831 dout("resending session close request for mds%d\n", 2832 s->s_mds); 2833 request_close_session(mdsc, s); 2834 ceph_put_mds_session(s); 2835 continue; 2836 } 2837 if (s->s_ttl && time_after(jiffies, s->s_ttl)) { 2838 if (s->s_state == CEPH_MDS_SESSION_OPEN) { 2839 s->s_state = CEPH_MDS_SESSION_HUNG; 2840 pr_info("mds%d hung\n", s->s_mds); 2841 } 2842 } 2843 if (s->s_state < CEPH_MDS_SESSION_OPEN) { 2844 /* this mds is failed or recovering, just wait */ 2845 ceph_put_mds_session(s); 2846 continue; 2847 } 2848 mutex_unlock(&mdsc->mutex); 2849 2850 mutex_lock(&s->s_mutex); 2851 if (renew_caps) 2852 send_renew_caps(mdsc, s); 2853 else 2854 ceph_con_keepalive(&s->s_con); 2855 ceph_add_cap_releases(mdsc, s); 2856 if (s->s_state == CEPH_MDS_SESSION_OPEN || 2857 s->s_state == CEPH_MDS_SESSION_HUNG) 2858 ceph_send_cap_releases(mdsc, s); 2859 mutex_unlock(&s->s_mutex); 2860 ceph_put_mds_session(s); 2861 2862 mutex_lock(&mdsc->mutex); 2863 } 2864 mutex_unlock(&mdsc->mutex); 2865 2866 schedule_delayed(mdsc); 2867 } 2868 2869 2870 int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client) 2871 { 2872 mdsc->client = client; 2873 mutex_init(&mdsc->mutex); 2874 mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); 2875 if (mdsc->mdsmap == NULL) 2876 return -ENOMEM; 2877 2878 init_completion(&mdsc->safe_umount_waiters); 2879 init_completion(&mdsc->session_close_waiters); 2880 INIT_LIST_HEAD(&mdsc->waiting_for_map); 2881 mdsc->sessions = NULL; 2882 mdsc->max_sessions = 0; 2883 mdsc->stopping = 0; 2884 init_rwsem(&mdsc->snap_rwsem); 2885 mdsc->snap_realms = RB_ROOT; 2886 INIT_LIST_HEAD(&mdsc->snap_empty); 2887 spin_lock_init(&mdsc->snap_empty_lock); 2888 mdsc->last_tid = 0; 2889 mdsc->request_tree = RB_ROOT; 2890 INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work); 2891 mdsc->last_renew_caps = jiffies; 2892 INIT_LIST_HEAD(&mdsc->cap_delay_list); 2893 spin_lock_init(&mdsc->cap_delay_lock); 2894 INIT_LIST_HEAD(&mdsc->snap_flush_list); 2895 spin_lock_init(&mdsc->snap_flush_lock); 2896 mdsc->cap_flush_seq = 0; 2897 INIT_LIST_HEAD(&mdsc->cap_dirty); 2898 mdsc->num_cap_flushing = 0; 2899 spin_lock_init(&mdsc->cap_dirty_lock); 2900 init_waitqueue_head(&mdsc->cap_flushing_wq); 2901 spin_lock_init(&mdsc->dentry_lru_lock); 2902 INIT_LIST_HEAD(&mdsc->dentry_lru); 2903 2904 ceph_caps_init(mdsc); 2905 ceph_adjust_min_caps(mdsc, client->min_caps); 2906 2907 return 0; 2908 } 2909 2910 /* 2911 * Wait for safe replies on open mds requests. If we time out, drop 2912 * all requests from the tree to avoid dangling dentry refs. 2913 */ 2914 static void wait_requests(struct ceph_mds_client *mdsc) 2915 { 2916 struct ceph_mds_request *req; 2917 struct ceph_client *client = mdsc->client; 2918 2919 mutex_lock(&mdsc->mutex); 2920 if (__get_oldest_req(mdsc)) { 2921 mutex_unlock(&mdsc->mutex); 2922 2923 dout("wait_requests waiting for requests\n"); 2924 wait_for_completion_timeout(&mdsc->safe_umount_waiters, 2925 client->mount_args->mount_timeout * HZ); 2926 2927 /* tear down remaining requests */ 2928 mutex_lock(&mdsc->mutex); 2929 while ((req = __get_oldest_req(mdsc))) { 2930 dout("wait_requests timed out on tid %llu\n", 2931 req->r_tid); 2932 __unregister_request(mdsc, req); 2933 } 2934 } 2935 mutex_unlock(&mdsc->mutex); 2936 dout("wait_requests done\n"); 2937 } 2938 2939 /* 2940 * called before mount is ro, and before dentries are torn down. 2941 * (hmm, does this still race with new lookups?) 2942 */ 2943 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) 2944 { 2945 dout("pre_umount\n"); 2946 mdsc->stopping = 1; 2947 2948 drop_leases(mdsc); 2949 ceph_flush_dirty_caps(mdsc); 2950 wait_requests(mdsc); 2951 2952 /* 2953 * wait for reply handlers to drop their request refs and 2954 * their inode/dcache refs 2955 */ 2956 ceph_msgr_flush(); 2957 } 2958 2959 /* 2960 * wait for all write mds requests to flush. 2961 */ 2962 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid) 2963 { 2964 struct ceph_mds_request *req = NULL, *nextreq; 2965 struct rb_node *n; 2966 2967 mutex_lock(&mdsc->mutex); 2968 dout("wait_unsafe_requests want %lld\n", want_tid); 2969 restart: 2970 req = __get_oldest_req(mdsc); 2971 while (req && req->r_tid <= want_tid) { 2972 /* find next request */ 2973 n = rb_next(&req->r_node); 2974 if (n) 2975 nextreq = rb_entry(n, struct ceph_mds_request, r_node); 2976 else 2977 nextreq = NULL; 2978 if ((req->r_op & CEPH_MDS_OP_WRITE)) { 2979 /* write op */ 2980 ceph_mdsc_get_request(req); 2981 if (nextreq) 2982 ceph_mdsc_get_request(nextreq); 2983 mutex_unlock(&mdsc->mutex); 2984 dout("wait_unsafe_requests wait on %llu (want %llu)\n", 2985 req->r_tid, want_tid); 2986 wait_for_completion(&req->r_safe_completion); 2987 mutex_lock(&mdsc->mutex); 2988 ceph_mdsc_put_request(req); 2989 if (!nextreq) 2990 break; /* next dne before, so we're done! */ 2991 if (RB_EMPTY_NODE(&nextreq->r_node)) { 2992 /* next request was removed from tree */ 2993 ceph_mdsc_put_request(nextreq); 2994 goto restart; 2995 } 2996 ceph_mdsc_put_request(nextreq); /* won't go away */ 2997 } 2998 req = nextreq; 2999 } 3000 mutex_unlock(&mdsc->mutex); 3001 dout("wait_unsafe_requests done\n"); 3002 } 3003 3004 void ceph_mdsc_sync(struct ceph_mds_client *mdsc) 3005 { 3006 u64 want_tid, want_flush; 3007 3008 if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN) 3009 return; 3010 3011 dout("sync\n"); 3012 mutex_lock(&mdsc->mutex); 3013 want_tid = mdsc->last_tid; 3014 want_flush = mdsc->cap_flush_seq; 3015 mutex_unlock(&mdsc->mutex); 3016 dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush); 3017 3018 ceph_flush_dirty_caps(mdsc); 3019 3020 wait_unsafe_requests(mdsc, want_tid); 3021 wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush)); 3022 } 3023 3024 3025 /* 3026 * called after sb is ro. 3027 */ 3028 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) 3029 { 3030 struct ceph_mds_session *session; 3031 int i; 3032 int n; 3033 struct ceph_client *client = mdsc->client; 3034 unsigned long started, timeout = client->mount_args->mount_timeout * HZ; 3035 3036 dout("close_sessions\n"); 3037 3038 mutex_lock(&mdsc->mutex); 3039 3040 /* close sessions */ 3041 started = jiffies; 3042 while (time_before(jiffies, started + timeout)) { 3043 dout("closing sessions\n"); 3044 n = 0; 3045 for (i = 0; i < mdsc->max_sessions; i++) { 3046 session = __ceph_lookup_mds_session(mdsc, i); 3047 if (!session) 3048 continue; 3049 mutex_unlock(&mdsc->mutex); 3050 mutex_lock(&session->s_mutex); 3051 __close_session(mdsc, session); 3052 mutex_unlock(&session->s_mutex); 3053 ceph_put_mds_session(session); 3054 mutex_lock(&mdsc->mutex); 3055 n++; 3056 } 3057 if (n == 0) 3058 break; 3059 3060 if (client->mount_state == CEPH_MOUNT_SHUTDOWN) 3061 break; 3062 3063 dout("waiting for sessions to close\n"); 3064 mutex_unlock(&mdsc->mutex); 3065 wait_for_completion_timeout(&mdsc->session_close_waiters, 3066 timeout); 3067 mutex_lock(&mdsc->mutex); 3068 } 3069 3070 /* tear down remaining sessions */ 3071 for (i = 0; i < mdsc->max_sessions; i++) { 3072 if (mdsc->sessions[i]) { 3073 session = get_session(mdsc->sessions[i]); 3074 __unregister_session(mdsc, session); 3075 mutex_unlock(&mdsc->mutex); 3076 mutex_lock(&session->s_mutex); 3077 remove_session_caps(session); 3078 mutex_unlock(&session->s_mutex); 3079 ceph_put_mds_session(session); 3080 mutex_lock(&mdsc->mutex); 3081 } 3082 } 3083 3084 WARN_ON(!list_empty(&mdsc->cap_delay_list)); 3085 3086 mutex_unlock(&mdsc->mutex); 3087 3088 ceph_cleanup_empty_realms(mdsc); 3089 3090 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ 3091 3092 dout("stopped\n"); 3093 } 3094 3095 void ceph_mdsc_stop(struct ceph_mds_client *mdsc) 3096 { 3097 dout("stop\n"); 3098 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ 3099 if (mdsc->mdsmap) 3100 ceph_mdsmap_destroy(mdsc->mdsmap); 3101 kfree(mdsc->sessions); 3102 ceph_caps_finalize(mdsc); 3103 } 3104 3105 3106 /* 3107 * handle mds map update. 3108 */ 3109 void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg) 3110 { 3111 u32 epoch; 3112 u32 maplen; 3113 void *p = msg->front.iov_base; 3114 void *end = p + msg->front.iov_len; 3115 struct ceph_mdsmap *newmap, *oldmap; 3116 struct ceph_fsid fsid; 3117 int err = -EINVAL; 3118 3119 ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad); 3120 ceph_decode_copy(&p, &fsid, sizeof(fsid)); 3121 if (ceph_check_fsid(mdsc->client, &fsid) < 0) 3122 return; 3123 epoch = ceph_decode_32(&p); 3124 maplen = ceph_decode_32(&p); 3125 dout("handle_map epoch %u len %d\n", epoch, (int)maplen); 3126 3127 /* do we need it? */ 3128 ceph_monc_got_mdsmap(&mdsc->client->monc, epoch); 3129 mutex_lock(&mdsc->mutex); 3130 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { 3131 dout("handle_map epoch %u <= our %u\n", 3132 epoch, mdsc->mdsmap->m_epoch); 3133 mutex_unlock(&mdsc->mutex); 3134 return; 3135 } 3136 3137 newmap = ceph_mdsmap_decode(&p, end); 3138 if (IS_ERR(newmap)) { 3139 err = PTR_ERR(newmap); 3140 goto bad_unlock; 3141 } 3142 3143 /* swap into place */ 3144 if (mdsc->mdsmap) { 3145 oldmap = mdsc->mdsmap; 3146 mdsc->mdsmap = newmap; 3147 check_new_map(mdsc, newmap, oldmap); 3148 ceph_mdsmap_destroy(oldmap); 3149 } else { 3150 mdsc->mdsmap = newmap; /* first mds map */ 3151 } 3152 mdsc->client->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size; 3153 3154 __wake_requests(mdsc, &mdsc->waiting_for_map); 3155 3156 mutex_unlock(&mdsc->mutex); 3157 schedule_delayed(mdsc); 3158 return; 3159 3160 bad_unlock: 3161 mutex_unlock(&mdsc->mutex); 3162 bad: 3163 pr_err("error decoding mdsmap %d\n", err); 3164 return; 3165 } 3166 3167 static struct ceph_connection *con_get(struct ceph_connection *con) 3168 { 3169 struct ceph_mds_session *s = con->private; 3170 3171 if (get_session(s)) { 3172 dout("mdsc con_get %p ok (%d)\n", s, atomic_read(&s->s_ref)); 3173 return con; 3174 } 3175 dout("mdsc con_get %p FAIL\n", s); 3176 return NULL; 3177 } 3178 3179 static void con_put(struct ceph_connection *con) 3180 { 3181 struct ceph_mds_session *s = con->private; 3182 3183 ceph_put_mds_session(s); 3184 dout("mdsc con_put %p (%d)\n", s, atomic_read(&s->s_ref)); 3185 } 3186 3187 /* 3188 * if the client is unresponsive for long enough, the mds will kill 3189 * the session entirely. 3190 */ 3191 static void peer_reset(struct ceph_connection *con) 3192 { 3193 struct ceph_mds_session *s = con->private; 3194 struct ceph_mds_client *mdsc = s->s_mdsc; 3195 3196 pr_warning("mds%d closed our session\n", s->s_mds); 3197 send_mds_reconnect(mdsc, s); 3198 } 3199 3200 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) 3201 { 3202 struct ceph_mds_session *s = con->private; 3203 struct ceph_mds_client *mdsc = s->s_mdsc; 3204 int type = le16_to_cpu(msg->hdr.type); 3205 3206 mutex_lock(&mdsc->mutex); 3207 if (__verify_registered_session(mdsc, s) < 0) { 3208 mutex_unlock(&mdsc->mutex); 3209 goto out; 3210 } 3211 mutex_unlock(&mdsc->mutex); 3212 3213 switch (type) { 3214 case CEPH_MSG_MDS_MAP: 3215 ceph_mdsc_handle_map(mdsc, msg); 3216 break; 3217 case CEPH_MSG_CLIENT_SESSION: 3218 handle_session(s, msg); 3219 break; 3220 case CEPH_MSG_CLIENT_REPLY: 3221 handle_reply(s, msg); 3222 break; 3223 case CEPH_MSG_CLIENT_REQUEST_FORWARD: 3224 handle_forward(mdsc, s, msg); 3225 break; 3226 case CEPH_MSG_CLIENT_CAPS: 3227 ceph_handle_caps(s, msg); 3228 break; 3229 case CEPH_MSG_CLIENT_SNAP: 3230 ceph_handle_snap(mdsc, s, msg); 3231 break; 3232 case CEPH_MSG_CLIENT_LEASE: 3233 handle_lease(mdsc, s, msg); 3234 break; 3235 3236 default: 3237 pr_err("received unknown message type %d %s\n", type, 3238 ceph_msg_type_name(type)); 3239 } 3240 out: 3241 ceph_msg_put(msg); 3242 } 3243 3244 /* 3245 * authentication 3246 */ 3247 static int get_authorizer(struct ceph_connection *con, 3248 void **buf, int *len, int *proto, 3249 void **reply_buf, int *reply_len, int force_new) 3250 { 3251 struct ceph_mds_session *s = con->private; 3252 struct ceph_mds_client *mdsc = s->s_mdsc; 3253 struct ceph_auth_client *ac = mdsc->client->monc.auth; 3254 int ret = 0; 3255 3256 if (force_new && s->s_authorizer) { 3257 ac->ops->destroy_authorizer(ac, s->s_authorizer); 3258 s->s_authorizer = NULL; 3259 } 3260 if (s->s_authorizer == NULL) { 3261 if (ac->ops->create_authorizer) { 3262 ret = ac->ops->create_authorizer( 3263 ac, CEPH_ENTITY_TYPE_MDS, 3264 &s->s_authorizer, 3265 &s->s_authorizer_buf, 3266 &s->s_authorizer_buf_len, 3267 &s->s_authorizer_reply_buf, 3268 &s->s_authorizer_reply_buf_len); 3269 if (ret) 3270 return ret; 3271 } 3272 } 3273 3274 *proto = ac->protocol; 3275 *buf = s->s_authorizer_buf; 3276 *len = s->s_authorizer_buf_len; 3277 *reply_buf = s->s_authorizer_reply_buf; 3278 *reply_len = s->s_authorizer_reply_buf_len; 3279 return 0; 3280 } 3281 3282 3283 static int verify_authorizer_reply(struct ceph_connection *con, int len) 3284 { 3285 struct ceph_mds_session *s = con->private; 3286 struct ceph_mds_client *mdsc = s->s_mdsc; 3287 struct ceph_auth_client *ac = mdsc->client->monc.auth; 3288 3289 return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len); 3290 } 3291 3292 static int invalidate_authorizer(struct ceph_connection *con) 3293 { 3294 struct ceph_mds_session *s = con->private; 3295 struct ceph_mds_client *mdsc = s->s_mdsc; 3296 struct ceph_auth_client *ac = mdsc->client->monc.auth; 3297 3298 if (ac->ops->invalidate_authorizer) 3299 ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); 3300 3301 return ceph_monc_validate_auth(&mdsc->client->monc); 3302 } 3303 3304 static const struct ceph_connection_operations mds_con_ops = { 3305 .get = con_get, 3306 .put = con_put, 3307 .dispatch = dispatch, 3308 .get_authorizer = get_authorizer, 3309 .verify_authorizer_reply = verify_authorizer_reply, 3310 .invalidate_authorizer = invalidate_authorizer, 3311 .peer_reset = peer_reset, 3312 }; 3313 3314 3315 3316 3317 /* eof */ 3318