1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/fs.h> 4 #include <linux/kernel.h> 5 #include <linux/sched/signal.h> 6 #include <linux/slab.h> 7 #include <linux/vmalloc.h> 8 #include <linux/wait.h> 9 #include <linux/writeback.h> 10 11 #include "super.h" 12 #include "mds_client.h" 13 #include "cache.h" 14 #include <linux/ceph/decode.h> 15 #include <linux/ceph/messenger.h> 16 17 /* 18 * Capability management 19 * 20 * The Ceph metadata servers control client access to inode metadata 21 * and file data by issuing capabilities, granting clients permission 22 * to read and/or write both inode field and file data to OSDs 23 * (storage nodes). Each capability consists of a set of bits 24 * indicating which operations are allowed. 25 * 26 * If the client holds a *_SHARED cap, the client has a coherent value 27 * that can be safely read from the cached inode. 28 * 29 * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the 30 * client is allowed to change inode attributes (e.g., file size, 31 * mtime), note its dirty state in the ceph_cap, and asynchronously 32 * flush that metadata change to the MDS. 33 * 34 * In the event of a conflicting operation (perhaps by another 35 * client), the MDS will revoke the conflicting client capabilities. 36 * 37 * In order for a client to cache an inode, it must hold a capability 38 * with at least one MDS server. When inodes are released, release 39 * notifications are batched and periodically sent en masse to the MDS 40 * cluster to release server state. 41 */ 42 43 static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc); 44 static void __kick_flushing_caps(struct ceph_mds_client *mdsc, 45 struct ceph_mds_session *session, 46 struct ceph_inode_info *ci, 47 u64 oldest_flush_tid); 48 49 /* 50 * Generate readable cap strings for debugging output. 51 */ 52 #define MAX_CAP_STR 20 53 static char cap_str[MAX_CAP_STR][40]; 54 static DEFINE_SPINLOCK(cap_str_lock); 55 static int last_cap_str; 56 57 static char *gcap_string(char *s, int c) 58 { 59 if (c & CEPH_CAP_GSHARED) 60 *s++ = 's'; 61 if (c & CEPH_CAP_GEXCL) 62 *s++ = 'x'; 63 if (c & CEPH_CAP_GCACHE) 64 *s++ = 'c'; 65 if (c & CEPH_CAP_GRD) 66 *s++ = 'r'; 67 if (c & CEPH_CAP_GWR) 68 *s++ = 'w'; 69 if (c & CEPH_CAP_GBUFFER) 70 *s++ = 'b'; 71 if (c & CEPH_CAP_GLAZYIO) 72 *s++ = 'l'; 73 return s; 74 } 75 76 const char *ceph_cap_string(int caps) 77 { 78 int i; 79 char *s; 80 int c; 81 82 spin_lock(&cap_str_lock); 83 i = last_cap_str++; 84 if (last_cap_str == MAX_CAP_STR) 85 last_cap_str = 0; 86 spin_unlock(&cap_str_lock); 87 88 s = cap_str[i]; 89 90 if (caps & CEPH_CAP_PIN) 91 *s++ = 'p'; 92 93 c = (caps >> CEPH_CAP_SAUTH) & 3; 94 if (c) { 95 *s++ = 'A'; 96 s = gcap_string(s, c); 97 } 98 99 c = (caps >> CEPH_CAP_SLINK) & 3; 100 if (c) { 101 *s++ = 'L'; 102 s = gcap_string(s, c); 103 } 104 105 c = (caps >> CEPH_CAP_SXATTR) & 3; 106 if (c) { 107 *s++ = 'X'; 108 s = gcap_string(s, c); 109 } 110 111 c = caps >> CEPH_CAP_SFILE; 112 if (c) { 113 *s++ = 'F'; 114 s = gcap_string(s, c); 115 } 116 117 if (s == cap_str[i]) 118 *s++ = '-'; 119 *s = 0; 120 return cap_str[i]; 121 } 122 123 void ceph_caps_init(struct ceph_mds_client *mdsc) 124 { 125 INIT_LIST_HEAD(&mdsc->caps_list); 126 spin_lock_init(&mdsc->caps_list_lock); 127 } 128 129 void ceph_caps_finalize(struct ceph_mds_client *mdsc) 130 { 131 struct ceph_cap *cap; 132 133 spin_lock(&mdsc->caps_list_lock); 134 while (!list_empty(&mdsc->caps_list)) { 135 cap = list_first_entry(&mdsc->caps_list, 136 struct ceph_cap, caps_item); 137 list_del(&cap->caps_item); 138 kmem_cache_free(ceph_cap_cachep, cap); 139 } 140 mdsc->caps_total_count = 0; 141 mdsc->caps_avail_count = 0; 142 mdsc->caps_use_count = 0; 143 mdsc->caps_reserve_count = 0; 144 mdsc->caps_min_count = 0; 145 spin_unlock(&mdsc->caps_list_lock); 146 } 147 148 void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta) 149 { 150 spin_lock(&mdsc->caps_list_lock); 151 mdsc->caps_min_count += delta; 152 BUG_ON(mdsc->caps_min_count < 0); 153 spin_unlock(&mdsc->caps_list_lock); 154 } 155 156 void ceph_reserve_caps(struct ceph_mds_client *mdsc, 157 struct ceph_cap_reservation *ctx, int need) 158 { 159 int i; 160 struct ceph_cap *cap; 161 int have; 162 int alloc = 0; 163 LIST_HEAD(newcaps); 164 165 dout("reserve caps ctx=%p need=%d\n", ctx, need); 166 167 /* first reserve any caps that are already allocated */ 168 spin_lock(&mdsc->caps_list_lock); 169 if (mdsc->caps_avail_count >= need) 170 have = need; 171 else 172 have = mdsc->caps_avail_count; 173 mdsc->caps_avail_count -= have; 174 mdsc->caps_reserve_count += have; 175 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + 176 mdsc->caps_reserve_count + 177 mdsc->caps_avail_count); 178 spin_unlock(&mdsc->caps_list_lock); 179 180 for (i = have; i < need; i++) { 181 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS); 182 if (!cap) 183 break; 184 list_add(&cap->caps_item, &newcaps); 185 alloc++; 186 } 187 /* we didn't manage to reserve as much as we needed */ 188 if (have + alloc != need) 189 pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n", 190 ctx, need, have + alloc); 191 192 spin_lock(&mdsc->caps_list_lock); 193 mdsc->caps_total_count += alloc; 194 mdsc->caps_reserve_count += alloc; 195 list_splice(&newcaps, &mdsc->caps_list); 196 197 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + 198 mdsc->caps_reserve_count + 199 mdsc->caps_avail_count); 200 spin_unlock(&mdsc->caps_list_lock); 201 202 ctx->count = need; 203 dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n", 204 ctx, mdsc->caps_total_count, mdsc->caps_use_count, 205 mdsc->caps_reserve_count, mdsc->caps_avail_count); 206 } 207 208 int ceph_unreserve_caps(struct ceph_mds_client *mdsc, 209 struct ceph_cap_reservation *ctx) 210 { 211 dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count); 212 if (ctx->count) { 213 spin_lock(&mdsc->caps_list_lock); 214 BUG_ON(mdsc->caps_reserve_count < ctx->count); 215 mdsc->caps_reserve_count -= ctx->count; 216 mdsc->caps_avail_count += ctx->count; 217 ctx->count = 0; 218 dout("unreserve caps %d = %d used + %d resv + %d avail\n", 219 mdsc->caps_total_count, mdsc->caps_use_count, 220 mdsc->caps_reserve_count, mdsc->caps_avail_count); 221 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + 222 mdsc->caps_reserve_count + 223 mdsc->caps_avail_count); 224 spin_unlock(&mdsc->caps_list_lock); 225 } 226 return 0; 227 } 228 229 struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc, 230 struct ceph_cap_reservation *ctx) 231 { 232 struct ceph_cap *cap = NULL; 233 234 /* temporary, until we do something about cap import/export */ 235 if (!ctx) { 236 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS); 237 if (cap) { 238 spin_lock(&mdsc->caps_list_lock); 239 mdsc->caps_use_count++; 240 mdsc->caps_total_count++; 241 spin_unlock(&mdsc->caps_list_lock); 242 } 243 return cap; 244 } 245 246 spin_lock(&mdsc->caps_list_lock); 247 dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n", 248 ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count, 249 mdsc->caps_reserve_count, mdsc->caps_avail_count); 250 BUG_ON(!ctx->count); 251 BUG_ON(ctx->count > mdsc->caps_reserve_count); 252 BUG_ON(list_empty(&mdsc->caps_list)); 253 254 ctx->count--; 255 mdsc->caps_reserve_count--; 256 mdsc->caps_use_count++; 257 258 cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item); 259 list_del(&cap->caps_item); 260 261 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + 262 mdsc->caps_reserve_count + mdsc->caps_avail_count); 263 spin_unlock(&mdsc->caps_list_lock); 264 return cap; 265 } 266 267 void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap) 268 { 269 spin_lock(&mdsc->caps_list_lock); 270 dout("put_cap %p %d = %d used + %d resv + %d avail\n", 271 cap, mdsc->caps_total_count, mdsc->caps_use_count, 272 mdsc->caps_reserve_count, mdsc->caps_avail_count); 273 mdsc->caps_use_count--; 274 /* 275 * Keep some preallocated caps around (ceph_min_count), to 276 * avoid lots of free/alloc churn. 277 */ 278 if (mdsc->caps_avail_count >= mdsc->caps_reserve_count + 279 mdsc->caps_min_count) { 280 mdsc->caps_total_count--; 281 kmem_cache_free(ceph_cap_cachep, cap); 282 } else { 283 mdsc->caps_avail_count++; 284 list_add(&cap->caps_item, &mdsc->caps_list); 285 } 286 287 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + 288 mdsc->caps_reserve_count + mdsc->caps_avail_count); 289 spin_unlock(&mdsc->caps_list_lock); 290 } 291 292 void ceph_reservation_status(struct ceph_fs_client *fsc, 293 int *total, int *avail, int *used, int *reserved, 294 int *min) 295 { 296 struct ceph_mds_client *mdsc = fsc->mdsc; 297 298 if (total) 299 *total = mdsc->caps_total_count; 300 if (avail) 301 *avail = mdsc->caps_avail_count; 302 if (used) 303 *used = mdsc->caps_use_count; 304 if (reserved) 305 *reserved = mdsc->caps_reserve_count; 306 if (min) 307 *min = mdsc->caps_min_count; 308 } 309 310 /* 311 * Find ceph_cap for given mds, if any. 312 * 313 * Called with i_ceph_lock held. 314 */ 315 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds) 316 { 317 struct ceph_cap *cap; 318 struct rb_node *n = ci->i_caps.rb_node; 319 320 while (n) { 321 cap = rb_entry(n, struct ceph_cap, ci_node); 322 if (mds < cap->mds) 323 n = n->rb_left; 324 else if (mds > cap->mds) 325 n = n->rb_right; 326 else 327 return cap; 328 } 329 return NULL; 330 } 331 332 struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds) 333 { 334 struct ceph_cap *cap; 335 336 spin_lock(&ci->i_ceph_lock); 337 cap = __get_cap_for_mds(ci, mds); 338 spin_unlock(&ci->i_ceph_lock); 339 return cap; 340 } 341 342 /* 343 * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1. 344 */ 345 static int __ceph_get_cap_mds(struct ceph_inode_info *ci) 346 { 347 struct ceph_cap *cap; 348 int mds = -1; 349 struct rb_node *p; 350 351 /* prefer mds with WR|BUFFER|EXCL caps */ 352 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 353 cap = rb_entry(p, struct ceph_cap, ci_node); 354 mds = cap->mds; 355 if (cap->issued & (CEPH_CAP_FILE_WR | 356 CEPH_CAP_FILE_BUFFER | 357 CEPH_CAP_FILE_EXCL)) 358 break; 359 } 360 return mds; 361 } 362 363 int ceph_get_cap_mds(struct inode *inode) 364 { 365 struct ceph_inode_info *ci = ceph_inode(inode); 366 int mds; 367 spin_lock(&ci->i_ceph_lock); 368 mds = __ceph_get_cap_mds(ceph_inode(inode)); 369 spin_unlock(&ci->i_ceph_lock); 370 return mds; 371 } 372 373 /* 374 * Called under i_ceph_lock. 375 */ 376 static void __insert_cap_node(struct ceph_inode_info *ci, 377 struct ceph_cap *new) 378 { 379 struct rb_node **p = &ci->i_caps.rb_node; 380 struct rb_node *parent = NULL; 381 struct ceph_cap *cap = NULL; 382 383 while (*p) { 384 parent = *p; 385 cap = rb_entry(parent, struct ceph_cap, ci_node); 386 if (new->mds < cap->mds) 387 p = &(*p)->rb_left; 388 else if (new->mds > cap->mds) 389 p = &(*p)->rb_right; 390 else 391 BUG(); 392 } 393 394 rb_link_node(&new->ci_node, parent, p); 395 rb_insert_color(&new->ci_node, &ci->i_caps); 396 } 397 398 /* 399 * (re)set cap hold timeouts, which control the delayed release 400 * of unused caps back to the MDS. Should be called on cap use. 401 */ 402 static void __cap_set_timeouts(struct ceph_mds_client *mdsc, 403 struct ceph_inode_info *ci) 404 { 405 struct ceph_mount_options *ma = mdsc->fsc->mount_options; 406 407 ci->i_hold_caps_min = round_jiffies(jiffies + 408 ma->caps_wanted_delay_min * HZ); 409 ci->i_hold_caps_max = round_jiffies(jiffies + 410 ma->caps_wanted_delay_max * HZ); 411 dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode, 412 ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies); 413 } 414 415 /* 416 * (Re)queue cap at the end of the delayed cap release list. 417 * 418 * If I_FLUSH is set, leave the inode at the front of the list. 419 * 420 * Caller holds i_ceph_lock 421 * -> we take mdsc->cap_delay_lock 422 */ 423 static void __cap_delay_requeue(struct ceph_mds_client *mdsc, 424 struct ceph_inode_info *ci) 425 { 426 __cap_set_timeouts(mdsc, ci); 427 dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode, 428 ci->i_ceph_flags, ci->i_hold_caps_max); 429 if (!mdsc->stopping) { 430 spin_lock(&mdsc->cap_delay_lock); 431 if (!list_empty(&ci->i_cap_delay_list)) { 432 if (ci->i_ceph_flags & CEPH_I_FLUSH) 433 goto no_change; 434 list_del_init(&ci->i_cap_delay_list); 435 } 436 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list); 437 no_change: 438 spin_unlock(&mdsc->cap_delay_lock); 439 } 440 } 441 442 /* 443 * Queue an inode for immediate writeback. Mark inode with I_FLUSH, 444 * indicating we should send a cap message to flush dirty metadata 445 * asap, and move to the front of the delayed cap list. 446 */ 447 static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc, 448 struct ceph_inode_info *ci) 449 { 450 dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode); 451 spin_lock(&mdsc->cap_delay_lock); 452 ci->i_ceph_flags |= CEPH_I_FLUSH; 453 if (!list_empty(&ci->i_cap_delay_list)) 454 list_del_init(&ci->i_cap_delay_list); 455 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list); 456 spin_unlock(&mdsc->cap_delay_lock); 457 } 458 459 /* 460 * Cancel delayed work on cap. 461 * 462 * Caller must hold i_ceph_lock. 463 */ 464 static void __cap_delay_cancel(struct ceph_mds_client *mdsc, 465 struct ceph_inode_info *ci) 466 { 467 dout("__cap_delay_cancel %p\n", &ci->vfs_inode); 468 if (list_empty(&ci->i_cap_delay_list)) 469 return; 470 spin_lock(&mdsc->cap_delay_lock); 471 list_del_init(&ci->i_cap_delay_list); 472 spin_unlock(&mdsc->cap_delay_lock); 473 } 474 475 /* 476 * Common issue checks for add_cap, handle_cap_grant. 477 */ 478 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap, 479 unsigned issued) 480 { 481 unsigned had = __ceph_caps_issued(ci, NULL); 482 483 /* 484 * Each time we receive FILE_CACHE anew, we increment 485 * i_rdcache_gen. 486 */ 487 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) && 488 (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) { 489 ci->i_rdcache_gen++; 490 } 491 492 /* 493 * if we are newly issued FILE_SHARED, mark dir not complete; we 494 * don't know what happened to this directory while we didn't 495 * have the cap. 496 */ 497 if ((issued & CEPH_CAP_FILE_SHARED) && 498 (had & CEPH_CAP_FILE_SHARED) == 0) { 499 ci->i_shared_gen++; 500 if (S_ISDIR(ci->vfs_inode.i_mode)) { 501 dout(" marking %p NOT complete\n", &ci->vfs_inode); 502 __ceph_dir_clear_complete(ci); 503 } 504 } 505 } 506 507 /* 508 * Add a capability under the given MDS session. 509 * 510 * Caller should hold session snap_rwsem (read) and s_mutex. 511 * 512 * @fmode is the open file mode, if we are opening a file, otherwise 513 * it is < 0. (This is so we can atomically add the cap and add an 514 * open file reference to it.) 515 */ 516 void ceph_add_cap(struct inode *inode, 517 struct ceph_mds_session *session, u64 cap_id, 518 int fmode, unsigned issued, unsigned wanted, 519 unsigned seq, unsigned mseq, u64 realmino, int flags, 520 struct ceph_cap **new_cap) 521 { 522 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 523 struct ceph_inode_info *ci = ceph_inode(inode); 524 struct ceph_cap *cap; 525 int mds = session->s_mds; 526 int actual_wanted; 527 528 dout("add_cap %p mds%d cap %llx %s seq %d\n", inode, 529 session->s_mds, cap_id, ceph_cap_string(issued), seq); 530 531 /* 532 * If we are opening the file, include file mode wanted bits 533 * in wanted. 534 */ 535 if (fmode >= 0) 536 wanted |= ceph_caps_for_mode(fmode); 537 538 cap = __get_cap_for_mds(ci, mds); 539 if (!cap) { 540 cap = *new_cap; 541 *new_cap = NULL; 542 543 cap->issued = 0; 544 cap->implemented = 0; 545 cap->mds = mds; 546 cap->mds_wanted = 0; 547 cap->mseq = 0; 548 549 cap->ci = ci; 550 __insert_cap_node(ci, cap); 551 552 /* add to session cap list */ 553 cap->session = session; 554 spin_lock(&session->s_cap_lock); 555 list_add_tail(&cap->session_caps, &session->s_caps); 556 session->s_nr_caps++; 557 spin_unlock(&session->s_cap_lock); 558 } else { 559 /* 560 * auth mds of the inode changed. we received the cap export 561 * message, but still haven't received the cap import message. 562 * handle_cap_export() updated the new auth MDS' cap. 563 * 564 * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing 565 * a message that was send before the cap import message. So 566 * don't remove caps. 567 */ 568 if (ceph_seq_cmp(seq, cap->seq) <= 0) { 569 WARN_ON(cap != ci->i_auth_cap); 570 WARN_ON(cap->cap_id != cap_id); 571 seq = cap->seq; 572 mseq = cap->mseq; 573 issued |= cap->issued; 574 flags |= CEPH_CAP_FLAG_AUTH; 575 } 576 } 577 578 if (!ci->i_snap_realm) { 579 /* 580 * add this inode to the appropriate snap realm 581 */ 582 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc, 583 realmino); 584 if (realm) { 585 spin_lock(&realm->inodes_with_caps_lock); 586 ci->i_snap_realm = realm; 587 list_add(&ci->i_snap_realm_item, 588 &realm->inodes_with_caps); 589 spin_unlock(&realm->inodes_with_caps_lock); 590 } else { 591 pr_err("ceph_add_cap: couldn't find snap realm %llx\n", 592 realmino); 593 WARN_ON(!realm); 594 } 595 } 596 597 __check_cap_issue(ci, cap, issued); 598 599 /* 600 * If we are issued caps we don't want, or the mds' wanted 601 * value appears to be off, queue a check so we'll release 602 * later and/or update the mds wanted value. 603 */ 604 actual_wanted = __ceph_caps_wanted(ci); 605 if ((wanted & ~actual_wanted) || 606 (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) { 607 dout(" issued %s, mds wanted %s, actual %s, queueing\n", 608 ceph_cap_string(issued), ceph_cap_string(wanted), 609 ceph_cap_string(actual_wanted)); 610 __cap_delay_requeue(mdsc, ci); 611 } 612 613 if (flags & CEPH_CAP_FLAG_AUTH) { 614 if (ci->i_auth_cap == NULL || 615 ceph_seq_cmp(ci->i_auth_cap->mseq, mseq) < 0) { 616 ci->i_auth_cap = cap; 617 cap->mds_wanted = wanted; 618 } 619 } else { 620 WARN_ON(ci->i_auth_cap == cap); 621 } 622 623 dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n", 624 inode, ceph_vinop(inode), cap, ceph_cap_string(issued), 625 ceph_cap_string(issued|cap->issued), seq, mds); 626 cap->cap_id = cap_id; 627 cap->issued = issued; 628 cap->implemented |= issued; 629 if (ceph_seq_cmp(mseq, cap->mseq) > 0) 630 cap->mds_wanted = wanted; 631 else 632 cap->mds_wanted |= wanted; 633 cap->seq = seq; 634 cap->issue_seq = seq; 635 cap->mseq = mseq; 636 cap->cap_gen = session->s_cap_gen; 637 638 if (fmode >= 0) 639 __ceph_get_fmode(ci, fmode); 640 } 641 642 /* 643 * Return true if cap has not timed out and belongs to the current 644 * generation of the MDS session (i.e. has not gone 'stale' due to 645 * us losing touch with the mds). 646 */ 647 static int __cap_is_valid(struct ceph_cap *cap) 648 { 649 unsigned long ttl; 650 u32 gen; 651 652 spin_lock(&cap->session->s_gen_ttl_lock); 653 gen = cap->session->s_cap_gen; 654 ttl = cap->session->s_cap_ttl; 655 spin_unlock(&cap->session->s_gen_ttl_lock); 656 657 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) { 658 dout("__cap_is_valid %p cap %p issued %s " 659 "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode, 660 cap, ceph_cap_string(cap->issued), cap->cap_gen, gen); 661 return 0; 662 } 663 664 return 1; 665 } 666 667 /* 668 * Return set of valid cap bits issued to us. Note that caps time 669 * out, and may be invalidated in bulk if the client session times out 670 * and session->s_cap_gen is bumped. 671 */ 672 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented) 673 { 674 int have = ci->i_snap_caps; 675 struct ceph_cap *cap; 676 struct rb_node *p; 677 678 if (implemented) 679 *implemented = 0; 680 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 681 cap = rb_entry(p, struct ceph_cap, ci_node); 682 if (!__cap_is_valid(cap)) 683 continue; 684 dout("__ceph_caps_issued %p cap %p issued %s\n", 685 &ci->vfs_inode, cap, ceph_cap_string(cap->issued)); 686 have |= cap->issued; 687 if (implemented) 688 *implemented |= cap->implemented; 689 } 690 /* 691 * exclude caps issued by non-auth MDS, but are been revoking 692 * by the auth MDS. The non-auth MDS should be revoking/exporting 693 * these caps, but the message is delayed. 694 */ 695 if (ci->i_auth_cap) { 696 cap = ci->i_auth_cap; 697 have &= ~cap->implemented | cap->issued; 698 } 699 return have; 700 } 701 702 /* 703 * Get cap bits issued by caps other than @ocap 704 */ 705 int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap) 706 { 707 int have = ci->i_snap_caps; 708 struct ceph_cap *cap; 709 struct rb_node *p; 710 711 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 712 cap = rb_entry(p, struct ceph_cap, ci_node); 713 if (cap == ocap) 714 continue; 715 if (!__cap_is_valid(cap)) 716 continue; 717 have |= cap->issued; 718 } 719 return have; 720 } 721 722 /* 723 * Move a cap to the end of the LRU (oldest caps at list head, newest 724 * at list tail). 725 */ 726 static void __touch_cap(struct ceph_cap *cap) 727 { 728 struct ceph_mds_session *s = cap->session; 729 730 spin_lock(&s->s_cap_lock); 731 if (s->s_cap_iterator == NULL) { 732 dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap, 733 s->s_mds); 734 list_move_tail(&cap->session_caps, &s->s_caps); 735 } else { 736 dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n", 737 &cap->ci->vfs_inode, cap, s->s_mds); 738 } 739 spin_unlock(&s->s_cap_lock); 740 } 741 742 /* 743 * Check if we hold the given mask. If so, move the cap(s) to the 744 * front of their respective LRUs. (This is the preferred way for 745 * callers to check for caps they want.) 746 */ 747 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) 748 { 749 struct ceph_cap *cap; 750 struct rb_node *p; 751 int have = ci->i_snap_caps; 752 753 if ((have & mask) == mask) { 754 dout("__ceph_caps_issued_mask %p snap issued %s" 755 " (mask %s)\n", &ci->vfs_inode, 756 ceph_cap_string(have), 757 ceph_cap_string(mask)); 758 return 1; 759 } 760 761 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 762 cap = rb_entry(p, struct ceph_cap, ci_node); 763 if (!__cap_is_valid(cap)) 764 continue; 765 if ((cap->issued & mask) == mask) { 766 dout("__ceph_caps_issued_mask %p cap %p issued %s" 767 " (mask %s)\n", &ci->vfs_inode, cap, 768 ceph_cap_string(cap->issued), 769 ceph_cap_string(mask)); 770 if (touch) 771 __touch_cap(cap); 772 return 1; 773 } 774 775 /* does a combination of caps satisfy mask? */ 776 have |= cap->issued; 777 if ((have & mask) == mask) { 778 dout("__ceph_caps_issued_mask %p combo issued %s" 779 " (mask %s)\n", &ci->vfs_inode, 780 ceph_cap_string(cap->issued), 781 ceph_cap_string(mask)); 782 if (touch) { 783 struct rb_node *q; 784 785 /* touch this + preceding caps */ 786 __touch_cap(cap); 787 for (q = rb_first(&ci->i_caps); q != p; 788 q = rb_next(q)) { 789 cap = rb_entry(q, struct ceph_cap, 790 ci_node); 791 if (!__cap_is_valid(cap)) 792 continue; 793 __touch_cap(cap); 794 } 795 } 796 return 1; 797 } 798 } 799 800 return 0; 801 } 802 803 /* 804 * Return true if mask caps are currently being revoked by an MDS. 805 */ 806 int __ceph_caps_revoking_other(struct ceph_inode_info *ci, 807 struct ceph_cap *ocap, int mask) 808 { 809 struct ceph_cap *cap; 810 struct rb_node *p; 811 812 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 813 cap = rb_entry(p, struct ceph_cap, ci_node); 814 if (cap != ocap && 815 (cap->implemented & ~cap->issued & mask)) 816 return 1; 817 } 818 return 0; 819 } 820 821 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask) 822 { 823 struct inode *inode = &ci->vfs_inode; 824 int ret; 825 826 spin_lock(&ci->i_ceph_lock); 827 ret = __ceph_caps_revoking_other(ci, NULL, mask); 828 spin_unlock(&ci->i_ceph_lock); 829 dout("ceph_caps_revoking %p %s = %d\n", inode, 830 ceph_cap_string(mask), ret); 831 return ret; 832 } 833 834 int __ceph_caps_used(struct ceph_inode_info *ci) 835 { 836 int used = 0; 837 if (ci->i_pin_ref) 838 used |= CEPH_CAP_PIN; 839 if (ci->i_rd_ref) 840 used |= CEPH_CAP_FILE_RD; 841 if (ci->i_rdcache_ref || 842 (!S_ISDIR(ci->vfs_inode.i_mode) && /* ignore readdir cache */ 843 ci->vfs_inode.i_data.nrpages)) 844 used |= CEPH_CAP_FILE_CACHE; 845 if (ci->i_wr_ref) 846 used |= CEPH_CAP_FILE_WR; 847 if (ci->i_wb_ref || ci->i_wrbuffer_ref) 848 used |= CEPH_CAP_FILE_BUFFER; 849 return used; 850 } 851 852 /* 853 * wanted, by virtue of open file modes 854 */ 855 int __ceph_caps_file_wanted(struct ceph_inode_info *ci) 856 { 857 int i, bits = 0; 858 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) { 859 if (ci->i_nr_by_mode[i]) 860 bits |= 1 << i; 861 } 862 if (bits == 0) 863 return 0; 864 return ceph_caps_for_mode(bits >> 1); 865 } 866 867 /* 868 * Return caps we have registered with the MDS(s) as 'wanted'. 869 */ 870 int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check) 871 { 872 struct ceph_cap *cap; 873 struct rb_node *p; 874 int mds_wanted = 0; 875 876 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 877 cap = rb_entry(p, struct ceph_cap, ci_node); 878 if (check && !__cap_is_valid(cap)) 879 continue; 880 if (cap == ci->i_auth_cap) 881 mds_wanted |= cap->mds_wanted; 882 else 883 mds_wanted |= (cap->mds_wanted & ~CEPH_CAP_ANY_FILE_WR); 884 } 885 return mds_wanted; 886 } 887 888 /* 889 * called under i_ceph_lock 890 */ 891 static int __ceph_is_any_caps(struct ceph_inode_info *ci) 892 { 893 return !RB_EMPTY_ROOT(&ci->i_caps); 894 } 895 896 int ceph_is_any_caps(struct inode *inode) 897 { 898 struct ceph_inode_info *ci = ceph_inode(inode); 899 int ret; 900 901 spin_lock(&ci->i_ceph_lock); 902 ret = __ceph_is_any_caps(ci); 903 spin_unlock(&ci->i_ceph_lock); 904 905 return ret; 906 } 907 908 static void drop_inode_snap_realm(struct ceph_inode_info *ci) 909 { 910 struct ceph_snap_realm *realm = ci->i_snap_realm; 911 spin_lock(&realm->inodes_with_caps_lock); 912 list_del_init(&ci->i_snap_realm_item); 913 ci->i_snap_realm_counter++; 914 ci->i_snap_realm = NULL; 915 spin_unlock(&realm->inodes_with_caps_lock); 916 ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc, 917 realm); 918 } 919 920 /* 921 * Remove a cap. Take steps to deal with a racing iterate_session_caps. 922 * 923 * caller should hold i_ceph_lock. 924 * caller will not hold session s_mutex if called from destroy_inode. 925 */ 926 void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) 927 { 928 struct ceph_mds_session *session = cap->session; 929 struct ceph_inode_info *ci = cap->ci; 930 struct ceph_mds_client *mdsc = 931 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; 932 int removed = 0; 933 934 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); 935 936 /* remove from session list */ 937 spin_lock(&session->s_cap_lock); 938 if (session->s_cap_iterator == cap) { 939 /* not yet, we are iterating over this very cap */ 940 dout("__ceph_remove_cap delaying %p removal from session %p\n", 941 cap, cap->session); 942 } else { 943 list_del_init(&cap->session_caps); 944 session->s_nr_caps--; 945 cap->session = NULL; 946 removed = 1; 947 } 948 /* protect backpointer with s_cap_lock: see iterate_session_caps */ 949 cap->ci = NULL; 950 951 /* 952 * s_cap_reconnect is protected by s_cap_lock. no one changes 953 * s_cap_gen while session is in the reconnect state. 954 */ 955 if (queue_release && 956 (!session->s_cap_reconnect || cap->cap_gen == session->s_cap_gen)) { 957 cap->queue_release = 1; 958 if (removed) { 959 list_add_tail(&cap->session_caps, 960 &session->s_cap_releases); 961 session->s_num_cap_releases++; 962 removed = 0; 963 } 964 } else { 965 cap->queue_release = 0; 966 } 967 cap->cap_ino = ci->i_vino.ino; 968 969 spin_unlock(&session->s_cap_lock); 970 971 /* remove from inode list */ 972 rb_erase(&cap->ci_node, &ci->i_caps); 973 if (ci->i_auth_cap == cap) 974 ci->i_auth_cap = NULL; 975 976 if (removed) 977 ceph_put_cap(mdsc, cap); 978 979 /* when reconnect denied, we remove session caps forcibly, 980 * i_wr_ref can be non-zero. If there are ongoing write, 981 * keep i_snap_realm. 982 */ 983 if (!__ceph_is_any_caps(ci) && ci->i_wr_ref == 0 && ci->i_snap_realm) 984 drop_inode_snap_realm(ci); 985 986 if (!__ceph_is_any_real_caps(ci)) 987 __cap_delay_cancel(mdsc, ci); 988 } 989 990 struct cap_msg_args { 991 struct ceph_mds_session *session; 992 u64 ino, cid, follows; 993 u64 flush_tid, oldest_flush_tid, size, max_size; 994 u64 xattr_version; 995 struct ceph_buffer *xattr_buf; 996 struct timespec atime, mtime, ctime; 997 int op, caps, wanted, dirty; 998 u32 seq, issue_seq, mseq, time_warp_seq; 999 u32 flags; 1000 kuid_t uid; 1001 kgid_t gid; 1002 umode_t mode; 1003 bool inline_data; 1004 }; 1005 1006 /* 1007 * Build and send a cap message to the given MDS. 1008 * 1009 * Caller should be holding s_mutex. 1010 */ 1011 static int send_cap_msg(struct cap_msg_args *arg) 1012 { 1013 struct ceph_mds_caps *fc; 1014 struct ceph_msg *msg; 1015 void *p; 1016 size_t extra_len; 1017 struct timespec zerotime = {0}; 1018 struct ceph_osd_client *osdc = &arg->session->s_mdsc->fsc->client->osdc; 1019 1020 dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s" 1021 " seq %u/%u tid %llu/%llu mseq %u follows %lld size %llu/%llu" 1022 " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(arg->op), 1023 arg->cid, arg->ino, ceph_cap_string(arg->caps), 1024 ceph_cap_string(arg->wanted), ceph_cap_string(arg->dirty), 1025 arg->seq, arg->issue_seq, arg->flush_tid, arg->oldest_flush_tid, 1026 arg->mseq, arg->follows, arg->size, arg->max_size, 1027 arg->xattr_version, 1028 arg->xattr_buf ? (int)arg->xattr_buf->vec.iov_len : 0); 1029 1030 /* flock buffer size + inline version + inline data size + 1031 * osd_epoch_barrier + oldest_flush_tid */ 1032 extra_len = 4 + 8 + 4 + 4 + 8 + 4 + 4 + 4 + 8 + 8 + 4; 1033 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc) + extra_len, 1034 GFP_NOFS, false); 1035 if (!msg) 1036 return -ENOMEM; 1037 1038 msg->hdr.version = cpu_to_le16(10); 1039 msg->hdr.tid = cpu_to_le64(arg->flush_tid); 1040 1041 fc = msg->front.iov_base; 1042 memset(fc, 0, sizeof(*fc)); 1043 1044 fc->cap_id = cpu_to_le64(arg->cid); 1045 fc->op = cpu_to_le32(arg->op); 1046 fc->seq = cpu_to_le32(arg->seq); 1047 fc->issue_seq = cpu_to_le32(arg->issue_seq); 1048 fc->migrate_seq = cpu_to_le32(arg->mseq); 1049 fc->caps = cpu_to_le32(arg->caps); 1050 fc->wanted = cpu_to_le32(arg->wanted); 1051 fc->dirty = cpu_to_le32(arg->dirty); 1052 fc->ino = cpu_to_le64(arg->ino); 1053 fc->snap_follows = cpu_to_le64(arg->follows); 1054 1055 fc->size = cpu_to_le64(arg->size); 1056 fc->max_size = cpu_to_le64(arg->max_size); 1057 ceph_encode_timespec(&fc->mtime, &arg->mtime); 1058 ceph_encode_timespec(&fc->atime, &arg->atime); 1059 ceph_encode_timespec(&fc->ctime, &arg->ctime); 1060 fc->time_warp_seq = cpu_to_le32(arg->time_warp_seq); 1061 1062 fc->uid = cpu_to_le32(from_kuid(&init_user_ns, arg->uid)); 1063 fc->gid = cpu_to_le32(from_kgid(&init_user_ns, arg->gid)); 1064 fc->mode = cpu_to_le32(arg->mode); 1065 1066 fc->xattr_version = cpu_to_le64(arg->xattr_version); 1067 if (arg->xattr_buf) { 1068 msg->middle = ceph_buffer_get(arg->xattr_buf); 1069 fc->xattr_len = cpu_to_le32(arg->xattr_buf->vec.iov_len); 1070 msg->hdr.middle_len = cpu_to_le32(arg->xattr_buf->vec.iov_len); 1071 } 1072 1073 p = fc + 1; 1074 /* flock buffer size (version 2) */ 1075 ceph_encode_32(&p, 0); 1076 /* inline version (version 4) */ 1077 ceph_encode_64(&p, arg->inline_data ? 0 : CEPH_INLINE_NONE); 1078 /* inline data size */ 1079 ceph_encode_32(&p, 0); 1080 /* 1081 * osd_epoch_barrier (version 5) 1082 * The epoch_barrier is protected osdc->lock, so READ_ONCE here in 1083 * case it was recently changed 1084 */ 1085 ceph_encode_32(&p, READ_ONCE(osdc->epoch_barrier)); 1086 /* oldest_flush_tid (version 6) */ 1087 ceph_encode_64(&p, arg->oldest_flush_tid); 1088 1089 /* 1090 * caller_uid/caller_gid (version 7) 1091 * 1092 * Currently, we don't properly track which caller dirtied the caps 1093 * last, and force a flush of them when there is a conflict. For now, 1094 * just set this to 0:0, to emulate how the MDS has worked up to now. 1095 */ 1096 ceph_encode_32(&p, 0); 1097 ceph_encode_32(&p, 0); 1098 1099 /* pool namespace (version 8) (mds always ignores this) */ 1100 ceph_encode_32(&p, 0); 1101 1102 /* 1103 * btime and change_attr (version 9) 1104 * 1105 * We just zero these out for now, as the MDS ignores them unless 1106 * the requisite feature flags are set (which we don't do yet). 1107 */ 1108 ceph_encode_timespec(p, &zerotime); 1109 p += sizeof(struct ceph_timespec); 1110 ceph_encode_64(&p, 0); 1111 1112 /* Advisory flags (version 10) */ 1113 ceph_encode_32(&p, arg->flags); 1114 1115 ceph_con_send(&arg->session->s_con, msg); 1116 return 0; 1117 } 1118 1119 /* 1120 * Queue cap releases when an inode is dropped from our cache. Since 1121 * inode is about to be destroyed, there is no need for i_ceph_lock. 1122 */ 1123 void ceph_queue_caps_release(struct inode *inode) 1124 { 1125 struct ceph_inode_info *ci = ceph_inode(inode); 1126 struct rb_node *p; 1127 1128 p = rb_first(&ci->i_caps); 1129 while (p) { 1130 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node); 1131 p = rb_next(p); 1132 __ceph_remove_cap(cap, true); 1133 } 1134 } 1135 1136 /* 1137 * Send a cap msg on the given inode. Update our caps state, then 1138 * drop i_ceph_lock and send the message. 1139 * 1140 * Make note of max_size reported/requested from mds, revoked caps 1141 * that have now been implemented. 1142 * 1143 * Make half-hearted attempt ot to invalidate page cache if we are 1144 * dropping RDCACHE. Note that this will leave behind locked pages 1145 * that we'll then need to deal with elsewhere. 1146 * 1147 * Return non-zero if delayed release, or we experienced an error 1148 * such that the caller should requeue + retry later. 1149 * 1150 * called with i_ceph_lock, then drops it. 1151 * caller should hold snap_rwsem (read), s_mutex. 1152 */ 1153 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap, 1154 int op, bool sync, int used, int want, int retain, 1155 int flushing, u64 flush_tid, u64 oldest_flush_tid) 1156 __releases(cap->ci->i_ceph_lock) 1157 { 1158 struct ceph_inode_info *ci = cap->ci; 1159 struct inode *inode = &ci->vfs_inode; 1160 struct cap_msg_args arg; 1161 int held, revoking, dropping; 1162 int wake = 0; 1163 int delayed = 0; 1164 int ret; 1165 1166 held = cap->issued | cap->implemented; 1167 revoking = cap->implemented & ~cap->issued; 1168 retain &= ~revoking; 1169 dropping = cap->issued & ~retain; 1170 1171 dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n", 1172 inode, cap, cap->session, 1173 ceph_cap_string(held), ceph_cap_string(held & retain), 1174 ceph_cap_string(revoking)); 1175 BUG_ON((retain & CEPH_CAP_PIN) == 0); 1176 1177 arg.session = cap->session; 1178 1179 /* don't release wanted unless we've waited a bit. */ 1180 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 && 1181 time_before(jiffies, ci->i_hold_caps_min)) { 1182 dout(" delaying issued %s -> %s, wanted %s -> %s on send\n", 1183 ceph_cap_string(cap->issued), 1184 ceph_cap_string(cap->issued & retain), 1185 ceph_cap_string(cap->mds_wanted), 1186 ceph_cap_string(want)); 1187 want |= cap->mds_wanted; 1188 retain |= cap->issued; 1189 delayed = 1; 1190 } 1191 ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH); 1192 if (want & ~cap->mds_wanted) { 1193 /* user space may open/close single file frequently. 1194 * This avoids droping mds_wanted immediately after 1195 * requesting new mds_wanted. 1196 */ 1197 __cap_set_timeouts(mdsc, ci); 1198 } 1199 1200 cap->issued &= retain; /* drop bits we don't want */ 1201 if (cap->implemented & ~cap->issued) { 1202 /* 1203 * Wake up any waiters on wanted -> needed transition. 1204 * This is due to the weird transition from buffered 1205 * to sync IO... we need to flush dirty pages _before_ 1206 * allowing sync writes to avoid reordering. 1207 */ 1208 wake = 1; 1209 } 1210 cap->implemented &= cap->issued | used; 1211 cap->mds_wanted = want; 1212 1213 arg.ino = ceph_vino(inode).ino; 1214 arg.cid = cap->cap_id; 1215 arg.follows = flushing ? ci->i_head_snapc->seq : 0; 1216 arg.flush_tid = flush_tid; 1217 arg.oldest_flush_tid = oldest_flush_tid; 1218 1219 arg.size = inode->i_size; 1220 ci->i_reported_size = arg.size; 1221 arg.max_size = ci->i_wanted_max_size; 1222 ci->i_requested_max_size = arg.max_size; 1223 1224 if (flushing & CEPH_CAP_XATTR_EXCL) { 1225 __ceph_build_xattrs_blob(ci); 1226 arg.xattr_version = ci->i_xattrs.version; 1227 arg.xattr_buf = ci->i_xattrs.blob; 1228 } else { 1229 arg.xattr_buf = NULL; 1230 } 1231 1232 arg.mtime = inode->i_mtime; 1233 arg.atime = inode->i_atime; 1234 arg.ctime = inode->i_ctime; 1235 1236 arg.op = op; 1237 arg.caps = cap->implemented; 1238 arg.wanted = want; 1239 arg.dirty = flushing; 1240 1241 arg.seq = cap->seq; 1242 arg.issue_seq = cap->issue_seq; 1243 arg.mseq = cap->mseq; 1244 arg.time_warp_seq = ci->i_time_warp_seq; 1245 1246 arg.uid = inode->i_uid; 1247 arg.gid = inode->i_gid; 1248 arg.mode = inode->i_mode; 1249 1250 arg.inline_data = ci->i_inline_version != CEPH_INLINE_NONE; 1251 arg.flags = 0; 1252 if (sync) 1253 arg.flags |= CEPH_CLIENT_CAPS_SYNC; 1254 1255 spin_unlock(&ci->i_ceph_lock); 1256 1257 ret = send_cap_msg(&arg); 1258 if (ret < 0) { 1259 dout("error sending cap msg, must requeue %p\n", inode); 1260 delayed = 1; 1261 } 1262 1263 if (wake) 1264 wake_up_all(&ci->i_cap_wq); 1265 1266 return delayed; 1267 } 1268 1269 static inline int __send_flush_snap(struct inode *inode, 1270 struct ceph_mds_session *session, 1271 struct ceph_cap_snap *capsnap, 1272 u32 mseq, u64 oldest_flush_tid) 1273 { 1274 struct cap_msg_args arg; 1275 1276 arg.session = session; 1277 arg.ino = ceph_vino(inode).ino; 1278 arg.cid = 0; 1279 arg.follows = capsnap->follows; 1280 arg.flush_tid = capsnap->cap_flush.tid; 1281 arg.oldest_flush_tid = oldest_flush_tid; 1282 1283 arg.size = capsnap->size; 1284 arg.max_size = 0; 1285 arg.xattr_version = capsnap->xattr_version; 1286 arg.xattr_buf = capsnap->xattr_blob; 1287 1288 arg.atime = capsnap->atime; 1289 arg.mtime = capsnap->mtime; 1290 arg.ctime = capsnap->ctime; 1291 1292 arg.op = CEPH_CAP_OP_FLUSHSNAP; 1293 arg.caps = capsnap->issued; 1294 arg.wanted = 0; 1295 arg.dirty = capsnap->dirty; 1296 1297 arg.seq = 0; 1298 arg.issue_seq = 0; 1299 arg.mseq = mseq; 1300 arg.time_warp_seq = capsnap->time_warp_seq; 1301 1302 arg.uid = capsnap->uid; 1303 arg.gid = capsnap->gid; 1304 arg.mode = capsnap->mode; 1305 1306 arg.inline_data = capsnap->inline_data; 1307 arg.flags = 0; 1308 1309 return send_cap_msg(&arg); 1310 } 1311 1312 /* 1313 * When a snapshot is taken, clients accumulate dirty metadata on 1314 * inodes with capabilities in ceph_cap_snaps to describe the file 1315 * state at the time the snapshot was taken. This must be flushed 1316 * asynchronously back to the MDS once sync writes complete and dirty 1317 * data is written out. 1318 * 1319 * Called under i_ceph_lock. Takes s_mutex as needed. 1320 */ 1321 static void __ceph_flush_snaps(struct ceph_inode_info *ci, 1322 struct ceph_mds_session *session) 1323 __releases(ci->i_ceph_lock) 1324 __acquires(ci->i_ceph_lock) 1325 { 1326 struct inode *inode = &ci->vfs_inode; 1327 struct ceph_mds_client *mdsc = session->s_mdsc; 1328 struct ceph_cap_snap *capsnap; 1329 u64 oldest_flush_tid = 0; 1330 u64 first_tid = 1, last_tid = 0; 1331 1332 dout("__flush_snaps %p session %p\n", inode, session); 1333 1334 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 1335 /* 1336 * we need to wait for sync writes to complete and for dirty 1337 * pages to be written out. 1338 */ 1339 if (capsnap->dirty_pages || capsnap->writing) 1340 break; 1341 1342 /* should be removed by ceph_try_drop_cap_snap() */ 1343 BUG_ON(!capsnap->need_flush); 1344 1345 /* only flush each capsnap once */ 1346 if (capsnap->cap_flush.tid > 0) { 1347 dout(" already flushed %p, skipping\n", capsnap); 1348 continue; 1349 } 1350 1351 spin_lock(&mdsc->cap_dirty_lock); 1352 capsnap->cap_flush.tid = ++mdsc->last_cap_flush_tid; 1353 list_add_tail(&capsnap->cap_flush.g_list, 1354 &mdsc->cap_flush_list); 1355 if (oldest_flush_tid == 0) 1356 oldest_flush_tid = __get_oldest_flush_tid(mdsc); 1357 if (list_empty(&ci->i_flushing_item)) { 1358 list_add_tail(&ci->i_flushing_item, 1359 &session->s_cap_flushing); 1360 } 1361 spin_unlock(&mdsc->cap_dirty_lock); 1362 1363 list_add_tail(&capsnap->cap_flush.i_list, 1364 &ci->i_cap_flush_list); 1365 1366 if (first_tid == 1) 1367 first_tid = capsnap->cap_flush.tid; 1368 last_tid = capsnap->cap_flush.tid; 1369 } 1370 1371 ci->i_ceph_flags &= ~CEPH_I_FLUSH_SNAPS; 1372 1373 while (first_tid <= last_tid) { 1374 struct ceph_cap *cap = ci->i_auth_cap; 1375 struct ceph_cap_flush *cf; 1376 int ret; 1377 1378 if (!(cap && cap->session == session)) { 1379 dout("__flush_snaps %p auth cap %p not mds%d, " 1380 "stop\n", inode, cap, session->s_mds); 1381 break; 1382 } 1383 1384 ret = -ENOENT; 1385 list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) { 1386 if (cf->tid >= first_tid) { 1387 ret = 0; 1388 break; 1389 } 1390 } 1391 if (ret < 0) 1392 break; 1393 1394 first_tid = cf->tid + 1; 1395 1396 capsnap = container_of(cf, struct ceph_cap_snap, cap_flush); 1397 refcount_inc(&capsnap->nref); 1398 spin_unlock(&ci->i_ceph_lock); 1399 1400 dout("__flush_snaps %p capsnap %p tid %llu %s\n", 1401 inode, capsnap, cf->tid, ceph_cap_string(capsnap->dirty)); 1402 1403 ret = __send_flush_snap(inode, session, capsnap, cap->mseq, 1404 oldest_flush_tid); 1405 if (ret < 0) { 1406 pr_err("__flush_snaps: error sending cap flushsnap, " 1407 "ino (%llx.%llx) tid %llu follows %llu\n", 1408 ceph_vinop(inode), cf->tid, capsnap->follows); 1409 } 1410 1411 ceph_put_cap_snap(capsnap); 1412 spin_lock(&ci->i_ceph_lock); 1413 } 1414 } 1415 1416 void ceph_flush_snaps(struct ceph_inode_info *ci, 1417 struct ceph_mds_session **psession) 1418 { 1419 struct inode *inode = &ci->vfs_inode; 1420 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 1421 struct ceph_mds_session *session = NULL; 1422 int mds; 1423 1424 dout("ceph_flush_snaps %p\n", inode); 1425 if (psession) 1426 session = *psession; 1427 retry: 1428 spin_lock(&ci->i_ceph_lock); 1429 if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) { 1430 dout(" no capsnap needs flush, doing nothing\n"); 1431 goto out; 1432 } 1433 if (!ci->i_auth_cap) { 1434 dout(" no auth cap (migrating?), doing nothing\n"); 1435 goto out; 1436 } 1437 1438 mds = ci->i_auth_cap->session->s_mds; 1439 if (session && session->s_mds != mds) { 1440 dout(" oops, wrong session %p mutex\n", session); 1441 mutex_unlock(&session->s_mutex); 1442 ceph_put_mds_session(session); 1443 session = NULL; 1444 } 1445 if (!session) { 1446 spin_unlock(&ci->i_ceph_lock); 1447 mutex_lock(&mdsc->mutex); 1448 session = __ceph_lookup_mds_session(mdsc, mds); 1449 mutex_unlock(&mdsc->mutex); 1450 if (session) { 1451 dout(" inverting session/ino locks on %p\n", session); 1452 mutex_lock(&session->s_mutex); 1453 } 1454 goto retry; 1455 } 1456 1457 __ceph_flush_snaps(ci, session); 1458 out: 1459 spin_unlock(&ci->i_ceph_lock); 1460 1461 if (psession) { 1462 *psession = session; 1463 } else { 1464 mutex_unlock(&session->s_mutex); 1465 ceph_put_mds_session(session); 1466 } 1467 /* we flushed them all; remove this inode from the queue */ 1468 spin_lock(&mdsc->snap_flush_lock); 1469 list_del_init(&ci->i_snap_flush_item); 1470 spin_unlock(&mdsc->snap_flush_lock); 1471 } 1472 1473 /* 1474 * Mark caps dirty. If inode is newly dirty, return the dirty flags. 1475 * Caller is then responsible for calling __mark_inode_dirty with the 1476 * returned flags value. 1477 */ 1478 int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask, 1479 struct ceph_cap_flush **pcf) 1480 { 1481 struct ceph_mds_client *mdsc = 1482 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; 1483 struct inode *inode = &ci->vfs_inode; 1484 int was = ci->i_dirty_caps; 1485 int dirty = 0; 1486 1487 if (!ci->i_auth_cap) { 1488 pr_warn("__mark_dirty_caps %p %llx mask %s, " 1489 "but no auth cap (session was closed?)\n", 1490 inode, ceph_ino(inode), ceph_cap_string(mask)); 1491 return 0; 1492 } 1493 1494 dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode, 1495 ceph_cap_string(mask), ceph_cap_string(was), 1496 ceph_cap_string(was | mask)); 1497 ci->i_dirty_caps |= mask; 1498 if (was == 0) { 1499 WARN_ON_ONCE(ci->i_prealloc_cap_flush); 1500 swap(ci->i_prealloc_cap_flush, *pcf); 1501 1502 if (!ci->i_head_snapc) { 1503 WARN_ON_ONCE(!rwsem_is_locked(&mdsc->snap_rwsem)); 1504 ci->i_head_snapc = ceph_get_snap_context( 1505 ci->i_snap_realm->cached_context); 1506 } 1507 dout(" inode %p now dirty snapc %p auth cap %p\n", 1508 &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap); 1509 BUG_ON(!list_empty(&ci->i_dirty_item)); 1510 spin_lock(&mdsc->cap_dirty_lock); 1511 list_add(&ci->i_dirty_item, &mdsc->cap_dirty); 1512 spin_unlock(&mdsc->cap_dirty_lock); 1513 if (ci->i_flushing_caps == 0) { 1514 ihold(inode); 1515 dirty |= I_DIRTY_SYNC; 1516 } 1517 } else { 1518 WARN_ON_ONCE(!ci->i_prealloc_cap_flush); 1519 } 1520 BUG_ON(list_empty(&ci->i_dirty_item)); 1521 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) && 1522 (mask & CEPH_CAP_FILE_BUFFER)) 1523 dirty |= I_DIRTY_DATASYNC; 1524 __cap_delay_requeue(mdsc, ci); 1525 return dirty; 1526 } 1527 1528 struct ceph_cap_flush *ceph_alloc_cap_flush(void) 1529 { 1530 return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL); 1531 } 1532 1533 void ceph_free_cap_flush(struct ceph_cap_flush *cf) 1534 { 1535 if (cf) 1536 kmem_cache_free(ceph_cap_flush_cachep, cf); 1537 } 1538 1539 static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc) 1540 { 1541 if (!list_empty(&mdsc->cap_flush_list)) { 1542 struct ceph_cap_flush *cf = 1543 list_first_entry(&mdsc->cap_flush_list, 1544 struct ceph_cap_flush, g_list); 1545 return cf->tid; 1546 } 1547 return 0; 1548 } 1549 1550 /* 1551 * Remove cap_flush from the mdsc's or inode's flushing cap list. 1552 * Return true if caller needs to wake up flush waiters. 1553 */ 1554 static bool __finish_cap_flush(struct ceph_mds_client *mdsc, 1555 struct ceph_inode_info *ci, 1556 struct ceph_cap_flush *cf) 1557 { 1558 struct ceph_cap_flush *prev; 1559 bool wake = cf->wake; 1560 if (mdsc) { 1561 /* are there older pending cap flushes? */ 1562 if (wake && cf->g_list.prev != &mdsc->cap_flush_list) { 1563 prev = list_prev_entry(cf, g_list); 1564 prev->wake = true; 1565 wake = false; 1566 } 1567 list_del(&cf->g_list); 1568 } else if (ci) { 1569 if (wake && cf->i_list.prev != &ci->i_cap_flush_list) { 1570 prev = list_prev_entry(cf, i_list); 1571 prev->wake = true; 1572 wake = false; 1573 } 1574 list_del(&cf->i_list); 1575 } else { 1576 BUG_ON(1); 1577 } 1578 return wake; 1579 } 1580 1581 /* 1582 * Add dirty inode to the flushing list. Assigned a seq number so we 1583 * can wait for caps to flush without starving. 1584 * 1585 * Called under i_ceph_lock. 1586 */ 1587 static int __mark_caps_flushing(struct inode *inode, 1588 struct ceph_mds_session *session, bool wake, 1589 u64 *flush_tid, u64 *oldest_flush_tid) 1590 { 1591 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 1592 struct ceph_inode_info *ci = ceph_inode(inode); 1593 struct ceph_cap_flush *cf = NULL; 1594 int flushing; 1595 1596 BUG_ON(ci->i_dirty_caps == 0); 1597 BUG_ON(list_empty(&ci->i_dirty_item)); 1598 BUG_ON(!ci->i_prealloc_cap_flush); 1599 1600 flushing = ci->i_dirty_caps; 1601 dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n", 1602 ceph_cap_string(flushing), 1603 ceph_cap_string(ci->i_flushing_caps), 1604 ceph_cap_string(ci->i_flushing_caps | flushing)); 1605 ci->i_flushing_caps |= flushing; 1606 ci->i_dirty_caps = 0; 1607 dout(" inode %p now !dirty\n", inode); 1608 1609 swap(cf, ci->i_prealloc_cap_flush); 1610 cf->caps = flushing; 1611 cf->wake = wake; 1612 1613 spin_lock(&mdsc->cap_dirty_lock); 1614 list_del_init(&ci->i_dirty_item); 1615 1616 cf->tid = ++mdsc->last_cap_flush_tid; 1617 list_add_tail(&cf->g_list, &mdsc->cap_flush_list); 1618 *oldest_flush_tid = __get_oldest_flush_tid(mdsc); 1619 1620 if (list_empty(&ci->i_flushing_item)) { 1621 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing); 1622 mdsc->num_cap_flushing++; 1623 } 1624 spin_unlock(&mdsc->cap_dirty_lock); 1625 1626 list_add_tail(&cf->i_list, &ci->i_cap_flush_list); 1627 1628 *flush_tid = cf->tid; 1629 return flushing; 1630 } 1631 1632 /* 1633 * try to invalidate mapping pages without blocking. 1634 */ 1635 static int try_nonblocking_invalidate(struct inode *inode) 1636 { 1637 struct ceph_inode_info *ci = ceph_inode(inode); 1638 u32 invalidating_gen = ci->i_rdcache_gen; 1639 1640 spin_unlock(&ci->i_ceph_lock); 1641 invalidate_mapping_pages(&inode->i_data, 0, -1); 1642 spin_lock(&ci->i_ceph_lock); 1643 1644 if (inode->i_data.nrpages == 0 && 1645 invalidating_gen == ci->i_rdcache_gen) { 1646 /* success. */ 1647 dout("try_nonblocking_invalidate %p success\n", inode); 1648 /* save any racing async invalidate some trouble */ 1649 ci->i_rdcache_revoking = ci->i_rdcache_gen - 1; 1650 return 0; 1651 } 1652 dout("try_nonblocking_invalidate %p failed\n", inode); 1653 return -1; 1654 } 1655 1656 /* 1657 * Swiss army knife function to examine currently used and wanted 1658 * versus held caps. Release, flush, ack revoked caps to mds as 1659 * appropriate. 1660 * 1661 * CHECK_CAPS_NODELAY - caller is delayed work and we should not delay 1662 * cap release further. 1663 * CHECK_CAPS_AUTHONLY - we should only check the auth cap 1664 * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without 1665 * further delay. 1666 */ 1667 void ceph_check_caps(struct ceph_inode_info *ci, int flags, 1668 struct ceph_mds_session *session) 1669 { 1670 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode); 1671 struct ceph_mds_client *mdsc = fsc->mdsc; 1672 struct inode *inode = &ci->vfs_inode; 1673 struct ceph_cap *cap; 1674 u64 flush_tid, oldest_flush_tid; 1675 int file_wanted, used, cap_used; 1676 int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */ 1677 int issued, implemented, want, retain, revoking, flushing = 0; 1678 int mds = -1; /* keep track of how far we've gone through i_caps list 1679 to avoid an infinite loop on retry */ 1680 struct rb_node *p; 1681 int delayed = 0, sent = 0, num; 1682 bool is_delayed = flags & CHECK_CAPS_NODELAY; 1683 bool queue_invalidate = false; 1684 bool force_requeue = false; 1685 bool tried_invalidate = false; 1686 1687 /* if we are unmounting, flush any unused caps immediately. */ 1688 if (mdsc->stopping) 1689 is_delayed = 1; 1690 1691 spin_lock(&ci->i_ceph_lock); 1692 1693 if (ci->i_ceph_flags & CEPH_I_FLUSH) 1694 flags |= CHECK_CAPS_FLUSH; 1695 1696 goto retry_locked; 1697 retry: 1698 spin_lock(&ci->i_ceph_lock); 1699 retry_locked: 1700 file_wanted = __ceph_caps_file_wanted(ci); 1701 used = __ceph_caps_used(ci); 1702 issued = __ceph_caps_issued(ci, &implemented); 1703 revoking = implemented & ~issued; 1704 1705 want = file_wanted; 1706 retain = file_wanted | used | CEPH_CAP_PIN; 1707 if (!mdsc->stopping && inode->i_nlink > 0) { 1708 if (file_wanted) { 1709 retain |= CEPH_CAP_ANY; /* be greedy */ 1710 } else if (S_ISDIR(inode->i_mode) && 1711 (issued & CEPH_CAP_FILE_SHARED) && 1712 __ceph_dir_is_complete(ci)) { 1713 /* 1714 * If a directory is complete, we want to keep 1715 * the exclusive cap. So that MDS does not end up 1716 * revoking the shared cap on every create/unlink 1717 * operation. 1718 */ 1719 want = CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL; 1720 retain |= want; 1721 } else { 1722 1723 retain |= CEPH_CAP_ANY_SHARED; 1724 /* 1725 * keep RD only if we didn't have the file open RW, 1726 * because then the mds would revoke it anyway to 1727 * journal max_size=0. 1728 */ 1729 if (ci->i_max_size == 0) 1730 retain |= CEPH_CAP_ANY_RD; 1731 } 1732 } 1733 1734 dout("check_caps %p file_want %s used %s dirty %s flushing %s" 1735 " issued %s revoking %s retain %s %s%s%s\n", inode, 1736 ceph_cap_string(file_wanted), 1737 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps), 1738 ceph_cap_string(ci->i_flushing_caps), 1739 ceph_cap_string(issued), ceph_cap_string(revoking), 1740 ceph_cap_string(retain), 1741 (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "", 1742 (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "", 1743 (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : ""); 1744 1745 /* 1746 * If we no longer need to hold onto old our caps, and we may 1747 * have cached pages, but don't want them, then try to invalidate. 1748 * If we fail, it's because pages are locked.... try again later. 1749 */ 1750 if ((!is_delayed || mdsc->stopping) && 1751 !S_ISDIR(inode->i_mode) && /* ignore readdir cache */ 1752 !(ci->i_wb_ref || ci->i_wrbuffer_ref) && /* no dirty pages... */ 1753 inode->i_data.nrpages && /* have cached pages */ 1754 (revoking & (CEPH_CAP_FILE_CACHE| 1755 CEPH_CAP_FILE_LAZYIO)) && /* or revoking cache */ 1756 !tried_invalidate) { 1757 dout("check_caps trying to invalidate on %p\n", inode); 1758 if (try_nonblocking_invalidate(inode) < 0) { 1759 if (revoking & (CEPH_CAP_FILE_CACHE| 1760 CEPH_CAP_FILE_LAZYIO)) { 1761 dout("check_caps queuing invalidate\n"); 1762 queue_invalidate = true; 1763 ci->i_rdcache_revoking = ci->i_rdcache_gen; 1764 } else { 1765 dout("check_caps failed to invalidate pages\n"); 1766 /* we failed to invalidate pages. check these 1767 caps again later. */ 1768 force_requeue = true; 1769 __cap_set_timeouts(mdsc, ci); 1770 } 1771 } 1772 tried_invalidate = true; 1773 goto retry_locked; 1774 } 1775 1776 num = 0; 1777 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 1778 cap = rb_entry(p, struct ceph_cap, ci_node); 1779 num++; 1780 1781 /* avoid looping forever */ 1782 if (mds >= cap->mds || 1783 ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap)) 1784 continue; 1785 1786 /* NOTE: no side-effects allowed, until we take s_mutex */ 1787 1788 cap_used = used; 1789 if (ci->i_auth_cap && cap != ci->i_auth_cap) 1790 cap_used &= ~ci->i_auth_cap->issued; 1791 1792 revoking = cap->implemented & ~cap->issued; 1793 dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n", 1794 cap->mds, cap, ceph_cap_string(cap_used), 1795 ceph_cap_string(cap->issued), 1796 ceph_cap_string(cap->implemented), 1797 ceph_cap_string(revoking)); 1798 1799 if (cap == ci->i_auth_cap && 1800 (cap->issued & CEPH_CAP_FILE_WR)) { 1801 /* request larger max_size from MDS? */ 1802 if (ci->i_wanted_max_size > ci->i_max_size && 1803 ci->i_wanted_max_size > ci->i_requested_max_size) { 1804 dout("requesting new max_size\n"); 1805 goto ack; 1806 } 1807 1808 /* approaching file_max? */ 1809 if ((inode->i_size << 1) >= ci->i_max_size && 1810 (ci->i_reported_size << 1) < ci->i_max_size) { 1811 dout("i_size approaching max_size\n"); 1812 goto ack; 1813 } 1814 } 1815 /* flush anything dirty? */ 1816 if (cap == ci->i_auth_cap) { 1817 if ((flags & CHECK_CAPS_FLUSH) && ci->i_dirty_caps) { 1818 dout("flushing dirty caps\n"); 1819 goto ack; 1820 } 1821 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) { 1822 dout("flushing snap caps\n"); 1823 goto ack; 1824 } 1825 } 1826 1827 /* completed revocation? going down and there are no caps? */ 1828 if (revoking && (revoking & cap_used) == 0) { 1829 dout("completed revocation of %s\n", 1830 ceph_cap_string(cap->implemented & ~cap->issued)); 1831 goto ack; 1832 } 1833 1834 /* want more caps from mds? */ 1835 if (want & ~(cap->mds_wanted | cap->issued)) 1836 goto ack; 1837 1838 /* things we might delay */ 1839 if ((cap->issued & ~retain) == 0 && 1840 cap->mds_wanted == want) 1841 continue; /* nope, all good */ 1842 1843 if (is_delayed) 1844 goto ack; 1845 1846 /* delay? */ 1847 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 && 1848 time_before(jiffies, ci->i_hold_caps_max)) { 1849 dout(" delaying issued %s -> %s, wanted %s -> %s\n", 1850 ceph_cap_string(cap->issued), 1851 ceph_cap_string(cap->issued & retain), 1852 ceph_cap_string(cap->mds_wanted), 1853 ceph_cap_string(want)); 1854 delayed++; 1855 continue; 1856 } 1857 1858 ack: 1859 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) { 1860 dout(" skipping %p I_NOFLUSH set\n", inode); 1861 continue; 1862 } 1863 1864 if (session && session != cap->session) { 1865 dout("oops, wrong session %p mutex\n", session); 1866 mutex_unlock(&session->s_mutex); 1867 session = NULL; 1868 } 1869 if (!session) { 1870 session = cap->session; 1871 if (mutex_trylock(&session->s_mutex) == 0) { 1872 dout("inverting session/ino locks on %p\n", 1873 session); 1874 spin_unlock(&ci->i_ceph_lock); 1875 if (took_snap_rwsem) { 1876 up_read(&mdsc->snap_rwsem); 1877 took_snap_rwsem = 0; 1878 } 1879 mutex_lock(&session->s_mutex); 1880 goto retry; 1881 } 1882 } 1883 1884 /* kick flushing and flush snaps before sending normal 1885 * cap message */ 1886 if (cap == ci->i_auth_cap && 1887 (ci->i_ceph_flags & 1888 (CEPH_I_KICK_FLUSH | CEPH_I_FLUSH_SNAPS))) { 1889 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) { 1890 spin_lock(&mdsc->cap_dirty_lock); 1891 oldest_flush_tid = __get_oldest_flush_tid(mdsc); 1892 spin_unlock(&mdsc->cap_dirty_lock); 1893 __kick_flushing_caps(mdsc, session, ci, 1894 oldest_flush_tid); 1895 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH; 1896 } 1897 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) 1898 __ceph_flush_snaps(ci, session); 1899 1900 goto retry_locked; 1901 } 1902 1903 /* take snap_rwsem after session mutex */ 1904 if (!took_snap_rwsem) { 1905 if (down_read_trylock(&mdsc->snap_rwsem) == 0) { 1906 dout("inverting snap/in locks on %p\n", 1907 inode); 1908 spin_unlock(&ci->i_ceph_lock); 1909 down_read(&mdsc->snap_rwsem); 1910 took_snap_rwsem = 1; 1911 goto retry; 1912 } 1913 took_snap_rwsem = 1; 1914 } 1915 1916 if (cap == ci->i_auth_cap && ci->i_dirty_caps) { 1917 flushing = __mark_caps_flushing(inode, session, false, 1918 &flush_tid, 1919 &oldest_flush_tid); 1920 } else { 1921 flushing = 0; 1922 flush_tid = 0; 1923 spin_lock(&mdsc->cap_dirty_lock); 1924 oldest_flush_tid = __get_oldest_flush_tid(mdsc); 1925 spin_unlock(&mdsc->cap_dirty_lock); 1926 } 1927 1928 mds = cap->mds; /* remember mds, so we don't repeat */ 1929 sent++; 1930 1931 /* __send_cap drops i_ceph_lock */ 1932 delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, false, 1933 cap_used, want, retain, flushing, 1934 flush_tid, oldest_flush_tid); 1935 goto retry; /* retake i_ceph_lock and restart our cap scan. */ 1936 } 1937 1938 /* 1939 * Reschedule delayed caps release if we delayed anything, 1940 * otherwise cancel. 1941 */ 1942 if (delayed && is_delayed) 1943 force_requeue = true; /* __send_cap delayed release; requeue */ 1944 if (!delayed && !is_delayed) 1945 __cap_delay_cancel(mdsc, ci); 1946 else if (!is_delayed || force_requeue) 1947 __cap_delay_requeue(mdsc, ci); 1948 1949 spin_unlock(&ci->i_ceph_lock); 1950 1951 if (queue_invalidate) 1952 ceph_queue_invalidate(inode); 1953 1954 if (session) 1955 mutex_unlock(&session->s_mutex); 1956 if (took_snap_rwsem) 1957 up_read(&mdsc->snap_rwsem); 1958 } 1959 1960 /* 1961 * Try to flush dirty caps back to the auth mds. 1962 */ 1963 static int try_flush_caps(struct inode *inode, u64 *ptid) 1964 { 1965 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 1966 struct ceph_inode_info *ci = ceph_inode(inode); 1967 struct ceph_mds_session *session = NULL; 1968 int flushing = 0; 1969 u64 flush_tid = 0, oldest_flush_tid = 0; 1970 1971 retry: 1972 spin_lock(&ci->i_ceph_lock); 1973 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) { 1974 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode); 1975 goto out; 1976 } 1977 if (ci->i_dirty_caps && ci->i_auth_cap) { 1978 struct ceph_cap *cap = ci->i_auth_cap; 1979 int used = __ceph_caps_used(ci); 1980 int want = __ceph_caps_wanted(ci); 1981 int delayed; 1982 1983 if (!session || session != cap->session) { 1984 spin_unlock(&ci->i_ceph_lock); 1985 if (session) 1986 mutex_unlock(&session->s_mutex); 1987 session = cap->session; 1988 mutex_lock(&session->s_mutex); 1989 goto retry; 1990 } 1991 if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) 1992 goto out; 1993 1994 flushing = __mark_caps_flushing(inode, session, true, 1995 &flush_tid, &oldest_flush_tid); 1996 1997 /* __send_cap drops i_ceph_lock */ 1998 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, true, 1999 used, want, (cap->issued | cap->implemented), 2000 flushing, flush_tid, oldest_flush_tid); 2001 2002 if (delayed) { 2003 spin_lock(&ci->i_ceph_lock); 2004 __cap_delay_requeue(mdsc, ci); 2005 spin_unlock(&ci->i_ceph_lock); 2006 } 2007 } else { 2008 if (!list_empty(&ci->i_cap_flush_list)) { 2009 struct ceph_cap_flush *cf = 2010 list_last_entry(&ci->i_cap_flush_list, 2011 struct ceph_cap_flush, i_list); 2012 cf->wake = true; 2013 flush_tid = cf->tid; 2014 } 2015 flushing = ci->i_flushing_caps; 2016 spin_unlock(&ci->i_ceph_lock); 2017 } 2018 out: 2019 if (session) 2020 mutex_unlock(&session->s_mutex); 2021 2022 *ptid = flush_tid; 2023 return flushing; 2024 } 2025 2026 /* 2027 * Return true if we've flushed caps through the given flush_tid. 2028 */ 2029 static int caps_are_flushed(struct inode *inode, u64 flush_tid) 2030 { 2031 struct ceph_inode_info *ci = ceph_inode(inode); 2032 int ret = 1; 2033 2034 spin_lock(&ci->i_ceph_lock); 2035 if (!list_empty(&ci->i_cap_flush_list)) { 2036 struct ceph_cap_flush * cf = 2037 list_first_entry(&ci->i_cap_flush_list, 2038 struct ceph_cap_flush, i_list); 2039 if (cf->tid <= flush_tid) 2040 ret = 0; 2041 } 2042 spin_unlock(&ci->i_ceph_lock); 2043 return ret; 2044 } 2045 2046 /* 2047 * wait for any unsafe requests to complete. 2048 */ 2049 static int unsafe_request_wait(struct inode *inode) 2050 { 2051 struct ceph_inode_info *ci = ceph_inode(inode); 2052 struct ceph_mds_request *req1 = NULL, *req2 = NULL; 2053 int ret, err = 0; 2054 2055 spin_lock(&ci->i_unsafe_lock); 2056 if (S_ISDIR(inode->i_mode) && !list_empty(&ci->i_unsafe_dirops)) { 2057 req1 = list_last_entry(&ci->i_unsafe_dirops, 2058 struct ceph_mds_request, 2059 r_unsafe_dir_item); 2060 ceph_mdsc_get_request(req1); 2061 } 2062 if (!list_empty(&ci->i_unsafe_iops)) { 2063 req2 = list_last_entry(&ci->i_unsafe_iops, 2064 struct ceph_mds_request, 2065 r_unsafe_target_item); 2066 ceph_mdsc_get_request(req2); 2067 } 2068 spin_unlock(&ci->i_unsafe_lock); 2069 2070 dout("unsafe_request_wait %p wait on tid %llu %llu\n", 2071 inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL); 2072 if (req1) { 2073 ret = !wait_for_completion_timeout(&req1->r_safe_completion, 2074 ceph_timeout_jiffies(req1->r_timeout)); 2075 if (ret) 2076 err = -EIO; 2077 ceph_mdsc_put_request(req1); 2078 } 2079 if (req2) { 2080 ret = !wait_for_completion_timeout(&req2->r_safe_completion, 2081 ceph_timeout_jiffies(req2->r_timeout)); 2082 if (ret) 2083 err = -EIO; 2084 ceph_mdsc_put_request(req2); 2085 } 2086 return err; 2087 } 2088 2089 int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync) 2090 { 2091 struct inode *inode = file->f_mapping->host; 2092 struct ceph_inode_info *ci = ceph_inode(inode); 2093 u64 flush_tid; 2094 int ret; 2095 int dirty; 2096 2097 dout("fsync %p%s\n", inode, datasync ? " datasync" : ""); 2098 2099 ret = filemap_write_and_wait_range(inode->i_mapping, start, end); 2100 if (ret < 0) 2101 goto out; 2102 2103 if (datasync) 2104 goto out; 2105 2106 inode_lock(inode); 2107 2108 dirty = try_flush_caps(inode, &flush_tid); 2109 dout("fsync dirty caps are %s\n", ceph_cap_string(dirty)); 2110 2111 ret = unsafe_request_wait(inode); 2112 2113 /* 2114 * only wait on non-file metadata writeback (the mds 2115 * can recover size and mtime, so we don't need to 2116 * wait for that) 2117 */ 2118 if (!ret && (dirty & ~CEPH_CAP_ANY_FILE_WR)) { 2119 ret = wait_event_interruptible(ci->i_cap_wq, 2120 caps_are_flushed(inode, flush_tid)); 2121 } 2122 inode_unlock(inode); 2123 out: 2124 dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret); 2125 return ret; 2126 } 2127 2128 /* 2129 * Flush any dirty caps back to the mds. If we aren't asked to wait, 2130 * queue inode for flush but don't do so immediately, because we can 2131 * get by with fewer MDS messages if we wait for data writeback to 2132 * complete first. 2133 */ 2134 int ceph_write_inode(struct inode *inode, struct writeback_control *wbc) 2135 { 2136 struct ceph_inode_info *ci = ceph_inode(inode); 2137 u64 flush_tid; 2138 int err = 0; 2139 int dirty; 2140 int wait = wbc->sync_mode == WB_SYNC_ALL; 2141 2142 dout("write_inode %p wait=%d\n", inode, wait); 2143 if (wait) { 2144 dirty = try_flush_caps(inode, &flush_tid); 2145 if (dirty) 2146 err = wait_event_interruptible(ci->i_cap_wq, 2147 caps_are_flushed(inode, flush_tid)); 2148 } else { 2149 struct ceph_mds_client *mdsc = 2150 ceph_sb_to_client(inode->i_sb)->mdsc; 2151 2152 spin_lock(&ci->i_ceph_lock); 2153 if (__ceph_caps_dirty(ci)) 2154 __cap_delay_requeue_front(mdsc, ci); 2155 spin_unlock(&ci->i_ceph_lock); 2156 } 2157 return err; 2158 } 2159 2160 static void __kick_flushing_caps(struct ceph_mds_client *mdsc, 2161 struct ceph_mds_session *session, 2162 struct ceph_inode_info *ci, 2163 u64 oldest_flush_tid) 2164 __releases(ci->i_ceph_lock) 2165 __acquires(ci->i_ceph_lock) 2166 { 2167 struct inode *inode = &ci->vfs_inode; 2168 struct ceph_cap *cap; 2169 struct ceph_cap_flush *cf; 2170 int ret; 2171 u64 first_tid = 0; 2172 2173 list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) { 2174 if (cf->tid < first_tid) 2175 continue; 2176 2177 cap = ci->i_auth_cap; 2178 if (!(cap && cap->session == session)) { 2179 pr_err("%p auth cap %p not mds%d ???\n", 2180 inode, cap, session->s_mds); 2181 break; 2182 } 2183 2184 first_tid = cf->tid + 1; 2185 2186 if (cf->caps) { 2187 dout("kick_flushing_caps %p cap %p tid %llu %s\n", 2188 inode, cap, cf->tid, ceph_cap_string(cf->caps)); 2189 ci->i_ceph_flags |= CEPH_I_NODELAY; 2190 ret = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, 2191 false, __ceph_caps_used(ci), 2192 __ceph_caps_wanted(ci), 2193 cap->issued | cap->implemented, 2194 cf->caps, cf->tid, oldest_flush_tid); 2195 if (ret) { 2196 pr_err("kick_flushing_caps: error sending " 2197 "cap flush, ino (%llx.%llx) " 2198 "tid %llu flushing %s\n", 2199 ceph_vinop(inode), cf->tid, 2200 ceph_cap_string(cf->caps)); 2201 } 2202 } else { 2203 struct ceph_cap_snap *capsnap = 2204 container_of(cf, struct ceph_cap_snap, 2205 cap_flush); 2206 dout("kick_flushing_caps %p capsnap %p tid %llu %s\n", 2207 inode, capsnap, cf->tid, 2208 ceph_cap_string(capsnap->dirty)); 2209 2210 refcount_inc(&capsnap->nref); 2211 spin_unlock(&ci->i_ceph_lock); 2212 2213 ret = __send_flush_snap(inode, session, capsnap, cap->mseq, 2214 oldest_flush_tid); 2215 if (ret < 0) { 2216 pr_err("kick_flushing_caps: error sending " 2217 "cap flushsnap, ino (%llx.%llx) " 2218 "tid %llu follows %llu\n", 2219 ceph_vinop(inode), cf->tid, 2220 capsnap->follows); 2221 } 2222 2223 ceph_put_cap_snap(capsnap); 2224 } 2225 2226 spin_lock(&ci->i_ceph_lock); 2227 } 2228 } 2229 2230 void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc, 2231 struct ceph_mds_session *session) 2232 { 2233 struct ceph_inode_info *ci; 2234 struct ceph_cap *cap; 2235 u64 oldest_flush_tid; 2236 2237 dout("early_kick_flushing_caps mds%d\n", session->s_mds); 2238 2239 spin_lock(&mdsc->cap_dirty_lock); 2240 oldest_flush_tid = __get_oldest_flush_tid(mdsc); 2241 spin_unlock(&mdsc->cap_dirty_lock); 2242 2243 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { 2244 spin_lock(&ci->i_ceph_lock); 2245 cap = ci->i_auth_cap; 2246 if (!(cap && cap->session == session)) { 2247 pr_err("%p auth cap %p not mds%d ???\n", 2248 &ci->vfs_inode, cap, session->s_mds); 2249 spin_unlock(&ci->i_ceph_lock); 2250 continue; 2251 } 2252 2253 2254 /* 2255 * if flushing caps were revoked, we re-send the cap flush 2256 * in client reconnect stage. This guarantees MDS * processes 2257 * the cap flush message before issuing the flushing caps to 2258 * other client. 2259 */ 2260 if ((cap->issued & ci->i_flushing_caps) != 2261 ci->i_flushing_caps) { 2262 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH; 2263 __kick_flushing_caps(mdsc, session, ci, 2264 oldest_flush_tid); 2265 } else { 2266 ci->i_ceph_flags |= CEPH_I_KICK_FLUSH; 2267 } 2268 2269 spin_unlock(&ci->i_ceph_lock); 2270 } 2271 } 2272 2273 void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, 2274 struct ceph_mds_session *session) 2275 { 2276 struct ceph_inode_info *ci; 2277 struct ceph_cap *cap; 2278 u64 oldest_flush_tid; 2279 2280 dout("kick_flushing_caps mds%d\n", session->s_mds); 2281 2282 spin_lock(&mdsc->cap_dirty_lock); 2283 oldest_flush_tid = __get_oldest_flush_tid(mdsc); 2284 spin_unlock(&mdsc->cap_dirty_lock); 2285 2286 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { 2287 spin_lock(&ci->i_ceph_lock); 2288 cap = ci->i_auth_cap; 2289 if (!(cap && cap->session == session)) { 2290 pr_err("%p auth cap %p not mds%d ???\n", 2291 &ci->vfs_inode, cap, session->s_mds); 2292 spin_unlock(&ci->i_ceph_lock); 2293 continue; 2294 } 2295 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) { 2296 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH; 2297 __kick_flushing_caps(mdsc, session, ci, 2298 oldest_flush_tid); 2299 } 2300 spin_unlock(&ci->i_ceph_lock); 2301 } 2302 } 2303 2304 static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc, 2305 struct ceph_mds_session *session, 2306 struct inode *inode) 2307 __releases(ci->i_ceph_lock) 2308 { 2309 struct ceph_inode_info *ci = ceph_inode(inode); 2310 struct ceph_cap *cap; 2311 2312 cap = ci->i_auth_cap; 2313 dout("kick_flushing_inode_caps %p flushing %s\n", inode, 2314 ceph_cap_string(ci->i_flushing_caps)); 2315 2316 if (!list_empty(&ci->i_cap_flush_list)) { 2317 u64 oldest_flush_tid; 2318 spin_lock(&mdsc->cap_dirty_lock); 2319 list_move_tail(&ci->i_flushing_item, 2320 &cap->session->s_cap_flushing); 2321 oldest_flush_tid = __get_oldest_flush_tid(mdsc); 2322 spin_unlock(&mdsc->cap_dirty_lock); 2323 2324 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH; 2325 __kick_flushing_caps(mdsc, session, ci, oldest_flush_tid); 2326 spin_unlock(&ci->i_ceph_lock); 2327 } else { 2328 spin_unlock(&ci->i_ceph_lock); 2329 } 2330 } 2331 2332 2333 /* 2334 * Take references to capabilities we hold, so that we don't release 2335 * them to the MDS prematurely. 2336 * 2337 * Protected by i_ceph_lock. 2338 */ 2339 static void __take_cap_refs(struct ceph_inode_info *ci, int got, 2340 bool snap_rwsem_locked) 2341 { 2342 if (got & CEPH_CAP_PIN) 2343 ci->i_pin_ref++; 2344 if (got & CEPH_CAP_FILE_RD) 2345 ci->i_rd_ref++; 2346 if (got & CEPH_CAP_FILE_CACHE) 2347 ci->i_rdcache_ref++; 2348 if (got & CEPH_CAP_FILE_WR) { 2349 if (ci->i_wr_ref == 0 && !ci->i_head_snapc) { 2350 BUG_ON(!snap_rwsem_locked); 2351 ci->i_head_snapc = ceph_get_snap_context( 2352 ci->i_snap_realm->cached_context); 2353 } 2354 ci->i_wr_ref++; 2355 } 2356 if (got & CEPH_CAP_FILE_BUFFER) { 2357 if (ci->i_wb_ref == 0) 2358 ihold(&ci->vfs_inode); 2359 ci->i_wb_ref++; 2360 dout("__take_cap_refs %p wb %d -> %d (?)\n", 2361 &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref); 2362 } 2363 } 2364 2365 /* 2366 * Try to grab cap references. Specify those refs we @want, and the 2367 * minimal set we @need. Also include the larger offset we are writing 2368 * to (when applicable), and check against max_size here as well. 2369 * Note that caller is responsible for ensuring max_size increases are 2370 * requested from the MDS. 2371 */ 2372 static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want, 2373 loff_t endoff, bool nonblock, int *got, int *err) 2374 { 2375 struct inode *inode = &ci->vfs_inode; 2376 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 2377 int ret = 0; 2378 int have, implemented; 2379 int file_wanted; 2380 bool snap_rwsem_locked = false; 2381 2382 dout("get_cap_refs %p need %s want %s\n", inode, 2383 ceph_cap_string(need), ceph_cap_string(want)); 2384 2385 again: 2386 spin_lock(&ci->i_ceph_lock); 2387 2388 /* make sure file is actually open */ 2389 file_wanted = __ceph_caps_file_wanted(ci); 2390 if ((file_wanted & need) != need) { 2391 dout("try_get_cap_refs need %s file_wanted %s, EBADF\n", 2392 ceph_cap_string(need), ceph_cap_string(file_wanted)); 2393 *err = -EBADF; 2394 ret = 1; 2395 goto out_unlock; 2396 } 2397 2398 /* finish pending truncate */ 2399 while (ci->i_truncate_pending) { 2400 spin_unlock(&ci->i_ceph_lock); 2401 if (snap_rwsem_locked) { 2402 up_read(&mdsc->snap_rwsem); 2403 snap_rwsem_locked = false; 2404 } 2405 __ceph_do_pending_vmtruncate(inode); 2406 spin_lock(&ci->i_ceph_lock); 2407 } 2408 2409 have = __ceph_caps_issued(ci, &implemented); 2410 2411 if (have & need & CEPH_CAP_FILE_WR) { 2412 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) { 2413 dout("get_cap_refs %p endoff %llu > maxsize %llu\n", 2414 inode, endoff, ci->i_max_size); 2415 if (endoff > ci->i_requested_max_size) { 2416 *err = -EAGAIN; 2417 ret = 1; 2418 } 2419 goto out_unlock; 2420 } 2421 /* 2422 * If a sync write is in progress, we must wait, so that we 2423 * can get a final snapshot value for size+mtime. 2424 */ 2425 if (__ceph_have_pending_cap_snap(ci)) { 2426 dout("get_cap_refs %p cap_snap_pending\n", inode); 2427 goto out_unlock; 2428 } 2429 } 2430 2431 if ((have & need) == need) { 2432 /* 2433 * Look at (implemented & ~have & not) so that we keep waiting 2434 * on transition from wanted -> needed caps. This is needed 2435 * for WRBUFFER|WR -> WR to avoid a new WR sync write from 2436 * going before a prior buffered writeback happens. 2437 */ 2438 int not = want & ~(have & need); 2439 int revoking = implemented & ~have; 2440 dout("get_cap_refs %p have %s but not %s (revoking %s)\n", 2441 inode, ceph_cap_string(have), ceph_cap_string(not), 2442 ceph_cap_string(revoking)); 2443 if ((revoking & not) == 0) { 2444 if (!snap_rwsem_locked && 2445 !ci->i_head_snapc && 2446 (need & CEPH_CAP_FILE_WR)) { 2447 if (!down_read_trylock(&mdsc->snap_rwsem)) { 2448 /* 2449 * we can not call down_read() when 2450 * task isn't in TASK_RUNNING state 2451 */ 2452 if (nonblock) { 2453 *err = -EAGAIN; 2454 ret = 1; 2455 goto out_unlock; 2456 } 2457 2458 spin_unlock(&ci->i_ceph_lock); 2459 down_read(&mdsc->snap_rwsem); 2460 snap_rwsem_locked = true; 2461 goto again; 2462 } 2463 snap_rwsem_locked = true; 2464 } 2465 *got = need | (have & want); 2466 if ((need & CEPH_CAP_FILE_RD) && 2467 !(*got & CEPH_CAP_FILE_CACHE)) 2468 ceph_disable_fscache_readpage(ci); 2469 __take_cap_refs(ci, *got, true); 2470 ret = 1; 2471 } 2472 } else { 2473 int session_readonly = false; 2474 if ((need & CEPH_CAP_FILE_WR) && ci->i_auth_cap) { 2475 struct ceph_mds_session *s = ci->i_auth_cap->session; 2476 spin_lock(&s->s_cap_lock); 2477 session_readonly = s->s_readonly; 2478 spin_unlock(&s->s_cap_lock); 2479 } 2480 if (session_readonly) { 2481 dout("get_cap_refs %p needed %s but mds%d readonly\n", 2482 inode, ceph_cap_string(need), ci->i_auth_cap->mds); 2483 *err = -EROFS; 2484 ret = 1; 2485 goto out_unlock; 2486 } 2487 2488 if (ci->i_ceph_flags & CEPH_I_CAP_DROPPED) { 2489 int mds_wanted; 2490 if (READ_ONCE(mdsc->fsc->mount_state) == 2491 CEPH_MOUNT_SHUTDOWN) { 2492 dout("get_cap_refs %p forced umount\n", inode); 2493 *err = -EIO; 2494 ret = 1; 2495 goto out_unlock; 2496 } 2497 mds_wanted = __ceph_caps_mds_wanted(ci, false); 2498 if (need & ~(mds_wanted & need)) { 2499 dout("get_cap_refs %p caps were dropped" 2500 " (session killed?)\n", inode); 2501 *err = -ESTALE; 2502 ret = 1; 2503 goto out_unlock; 2504 } 2505 if (!(file_wanted & ~mds_wanted)) 2506 ci->i_ceph_flags &= ~CEPH_I_CAP_DROPPED; 2507 } 2508 2509 dout("get_cap_refs %p have %s needed %s\n", inode, 2510 ceph_cap_string(have), ceph_cap_string(need)); 2511 } 2512 out_unlock: 2513 spin_unlock(&ci->i_ceph_lock); 2514 if (snap_rwsem_locked) 2515 up_read(&mdsc->snap_rwsem); 2516 2517 dout("get_cap_refs %p ret %d got %s\n", inode, 2518 ret, ceph_cap_string(*got)); 2519 return ret; 2520 } 2521 2522 /* 2523 * Check the offset we are writing up to against our current 2524 * max_size. If necessary, tell the MDS we want to write to 2525 * a larger offset. 2526 */ 2527 static void check_max_size(struct inode *inode, loff_t endoff) 2528 { 2529 struct ceph_inode_info *ci = ceph_inode(inode); 2530 int check = 0; 2531 2532 /* do we need to explicitly request a larger max_size? */ 2533 spin_lock(&ci->i_ceph_lock); 2534 if (endoff >= ci->i_max_size && endoff > ci->i_wanted_max_size) { 2535 dout("write %p at large endoff %llu, req max_size\n", 2536 inode, endoff); 2537 ci->i_wanted_max_size = endoff; 2538 } 2539 /* duplicate ceph_check_caps()'s logic */ 2540 if (ci->i_auth_cap && 2541 (ci->i_auth_cap->issued & CEPH_CAP_FILE_WR) && 2542 ci->i_wanted_max_size > ci->i_max_size && 2543 ci->i_wanted_max_size > ci->i_requested_max_size) 2544 check = 1; 2545 spin_unlock(&ci->i_ceph_lock); 2546 if (check) 2547 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 2548 } 2549 2550 int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want, int *got) 2551 { 2552 int ret, err = 0; 2553 2554 BUG_ON(need & ~CEPH_CAP_FILE_RD); 2555 BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)); 2556 ret = ceph_pool_perm_check(ci, need); 2557 if (ret < 0) 2558 return ret; 2559 2560 ret = try_get_cap_refs(ci, need, want, 0, true, got, &err); 2561 if (ret) { 2562 if (err == -EAGAIN) { 2563 ret = 0; 2564 } else if (err < 0) { 2565 ret = err; 2566 } 2567 } 2568 return ret; 2569 } 2570 2571 /* 2572 * Wait for caps, and take cap references. If we can't get a WR cap 2573 * due to a small max_size, make sure we check_max_size (and possibly 2574 * ask the mds) so we don't get hung up indefinitely. 2575 */ 2576 int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, 2577 loff_t endoff, int *got, struct page **pinned_page) 2578 { 2579 int _got, ret, err = 0; 2580 2581 ret = ceph_pool_perm_check(ci, need); 2582 if (ret < 0) 2583 return ret; 2584 2585 while (true) { 2586 if (endoff > 0) 2587 check_max_size(&ci->vfs_inode, endoff); 2588 2589 err = 0; 2590 _got = 0; 2591 ret = try_get_cap_refs(ci, need, want, endoff, 2592 false, &_got, &err); 2593 if (ret) { 2594 if (err == -EAGAIN) 2595 continue; 2596 if (err < 0) 2597 ret = err; 2598 } else { 2599 DEFINE_WAIT_FUNC(wait, woken_wake_function); 2600 add_wait_queue(&ci->i_cap_wq, &wait); 2601 2602 while (!try_get_cap_refs(ci, need, want, endoff, 2603 true, &_got, &err)) { 2604 if (signal_pending(current)) { 2605 ret = -ERESTARTSYS; 2606 break; 2607 } 2608 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 2609 } 2610 2611 remove_wait_queue(&ci->i_cap_wq, &wait); 2612 2613 if (err == -EAGAIN) 2614 continue; 2615 if (err < 0) 2616 ret = err; 2617 } 2618 if (ret < 0) { 2619 if (err == -ESTALE) { 2620 /* session was killed, try renew caps */ 2621 ret = ceph_renew_caps(&ci->vfs_inode); 2622 if (ret == 0) 2623 continue; 2624 } 2625 return ret; 2626 } 2627 2628 if (ci->i_inline_version != CEPH_INLINE_NONE && 2629 (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) && 2630 i_size_read(&ci->vfs_inode) > 0) { 2631 struct page *page = 2632 find_get_page(ci->vfs_inode.i_mapping, 0); 2633 if (page) { 2634 if (PageUptodate(page)) { 2635 *pinned_page = page; 2636 break; 2637 } 2638 put_page(page); 2639 } 2640 /* 2641 * drop cap refs first because getattr while 2642 * holding * caps refs can cause deadlock. 2643 */ 2644 ceph_put_cap_refs(ci, _got); 2645 _got = 0; 2646 2647 /* 2648 * getattr request will bring inline data into 2649 * page cache 2650 */ 2651 ret = __ceph_do_getattr(&ci->vfs_inode, NULL, 2652 CEPH_STAT_CAP_INLINE_DATA, 2653 true); 2654 if (ret < 0) 2655 return ret; 2656 continue; 2657 } 2658 break; 2659 } 2660 2661 if ((_got & CEPH_CAP_FILE_RD) && (_got & CEPH_CAP_FILE_CACHE)) 2662 ceph_fscache_revalidate_cookie(ci); 2663 2664 *got = _got; 2665 return 0; 2666 } 2667 2668 /* 2669 * Take cap refs. Caller must already know we hold at least one ref 2670 * on the caps in question or we don't know this is safe. 2671 */ 2672 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps) 2673 { 2674 spin_lock(&ci->i_ceph_lock); 2675 __take_cap_refs(ci, caps, false); 2676 spin_unlock(&ci->i_ceph_lock); 2677 } 2678 2679 2680 /* 2681 * drop cap_snap that is not associated with any snapshot. 2682 * we don't need to send FLUSHSNAP message for it. 2683 */ 2684 static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci, 2685 struct ceph_cap_snap *capsnap) 2686 { 2687 if (!capsnap->need_flush && 2688 !capsnap->writing && !capsnap->dirty_pages) { 2689 dout("dropping cap_snap %p follows %llu\n", 2690 capsnap, capsnap->follows); 2691 BUG_ON(capsnap->cap_flush.tid > 0); 2692 ceph_put_snap_context(capsnap->context); 2693 if (!list_is_last(&capsnap->ci_item, &ci->i_cap_snaps)) 2694 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS; 2695 2696 list_del(&capsnap->ci_item); 2697 ceph_put_cap_snap(capsnap); 2698 return 1; 2699 } 2700 return 0; 2701 } 2702 2703 /* 2704 * Release cap refs. 2705 * 2706 * If we released the last ref on any given cap, call ceph_check_caps 2707 * to release (or schedule a release). 2708 * 2709 * If we are releasing a WR cap (from a sync write), finalize any affected 2710 * cap_snap, and wake up any waiters. 2711 */ 2712 void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) 2713 { 2714 struct inode *inode = &ci->vfs_inode; 2715 int last = 0, put = 0, flushsnaps = 0, wake = 0; 2716 2717 spin_lock(&ci->i_ceph_lock); 2718 if (had & CEPH_CAP_PIN) 2719 --ci->i_pin_ref; 2720 if (had & CEPH_CAP_FILE_RD) 2721 if (--ci->i_rd_ref == 0) 2722 last++; 2723 if (had & CEPH_CAP_FILE_CACHE) 2724 if (--ci->i_rdcache_ref == 0) 2725 last++; 2726 if (had & CEPH_CAP_FILE_BUFFER) { 2727 if (--ci->i_wb_ref == 0) { 2728 last++; 2729 put++; 2730 } 2731 dout("put_cap_refs %p wb %d -> %d (?)\n", 2732 inode, ci->i_wb_ref+1, ci->i_wb_ref); 2733 } 2734 if (had & CEPH_CAP_FILE_WR) 2735 if (--ci->i_wr_ref == 0) { 2736 last++; 2737 if (__ceph_have_pending_cap_snap(ci)) { 2738 struct ceph_cap_snap *capsnap = 2739 list_last_entry(&ci->i_cap_snaps, 2740 struct ceph_cap_snap, 2741 ci_item); 2742 capsnap->writing = 0; 2743 if (ceph_try_drop_cap_snap(ci, capsnap)) 2744 put++; 2745 else if (__ceph_finish_cap_snap(ci, capsnap)) 2746 flushsnaps = 1; 2747 wake = 1; 2748 } 2749 if (ci->i_wrbuffer_ref_head == 0 && 2750 ci->i_dirty_caps == 0 && 2751 ci->i_flushing_caps == 0) { 2752 BUG_ON(!ci->i_head_snapc); 2753 ceph_put_snap_context(ci->i_head_snapc); 2754 ci->i_head_snapc = NULL; 2755 } 2756 /* see comment in __ceph_remove_cap() */ 2757 if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) 2758 drop_inode_snap_realm(ci); 2759 } 2760 spin_unlock(&ci->i_ceph_lock); 2761 2762 dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had), 2763 last ? " last" : "", put ? " put" : ""); 2764 2765 if (last && !flushsnaps) 2766 ceph_check_caps(ci, 0, NULL); 2767 else if (flushsnaps) 2768 ceph_flush_snaps(ci, NULL); 2769 if (wake) 2770 wake_up_all(&ci->i_cap_wq); 2771 while (put-- > 0) 2772 iput(inode); 2773 } 2774 2775 /* 2776 * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap 2777 * context. Adjust per-snap dirty page accounting as appropriate. 2778 * Once all dirty data for a cap_snap is flushed, flush snapped file 2779 * metadata back to the MDS. If we dropped the last ref, call 2780 * ceph_check_caps. 2781 */ 2782 void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, 2783 struct ceph_snap_context *snapc) 2784 { 2785 struct inode *inode = &ci->vfs_inode; 2786 struct ceph_cap_snap *capsnap = NULL; 2787 int put = 0; 2788 bool last = false; 2789 bool found = false; 2790 bool flush_snaps = false; 2791 bool complete_capsnap = false; 2792 2793 spin_lock(&ci->i_ceph_lock); 2794 ci->i_wrbuffer_ref -= nr; 2795 if (ci->i_wrbuffer_ref == 0) { 2796 last = true; 2797 put++; 2798 } 2799 2800 if (ci->i_head_snapc == snapc) { 2801 ci->i_wrbuffer_ref_head -= nr; 2802 if (ci->i_wrbuffer_ref_head == 0 && 2803 ci->i_wr_ref == 0 && 2804 ci->i_dirty_caps == 0 && 2805 ci->i_flushing_caps == 0) { 2806 BUG_ON(!ci->i_head_snapc); 2807 ceph_put_snap_context(ci->i_head_snapc); 2808 ci->i_head_snapc = NULL; 2809 } 2810 dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n", 2811 inode, 2812 ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr, 2813 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 2814 last ? " LAST" : ""); 2815 } else { 2816 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 2817 if (capsnap->context == snapc) { 2818 found = true; 2819 break; 2820 } 2821 } 2822 BUG_ON(!found); 2823 capsnap->dirty_pages -= nr; 2824 if (capsnap->dirty_pages == 0) { 2825 complete_capsnap = true; 2826 if (!capsnap->writing) { 2827 if (ceph_try_drop_cap_snap(ci, capsnap)) { 2828 put++; 2829 } else { 2830 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS; 2831 flush_snaps = true; 2832 } 2833 } 2834 } 2835 dout("put_wrbuffer_cap_refs on %p cap_snap %p " 2836 " snap %lld %d/%d -> %d/%d %s%s\n", 2837 inode, capsnap, capsnap->context->seq, 2838 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr, 2839 ci->i_wrbuffer_ref, capsnap->dirty_pages, 2840 last ? " (wrbuffer last)" : "", 2841 complete_capsnap ? " (complete capsnap)" : ""); 2842 } 2843 2844 spin_unlock(&ci->i_ceph_lock); 2845 2846 if (last) { 2847 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 2848 } else if (flush_snaps) { 2849 ceph_flush_snaps(ci, NULL); 2850 } 2851 if (complete_capsnap) 2852 wake_up_all(&ci->i_cap_wq); 2853 while (put-- > 0) 2854 iput(inode); 2855 } 2856 2857 /* 2858 * Invalidate unlinked inode's aliases, so we can drop the inode ASAP. 2859 */ 2860 static void invalidate_aliases(struct inode *inode) 2861 { 2862 struct dentry *dn, *prev = NULL; 2863 2864 dout("invalidate_aliases inode %p\n", inode); 2865 d_prune_aliases(inode); 2866 /* 2867 * For non-directory inode, d_find_alias() only returns 2868 * hashed dentry. After calling d_invalidate(), the 2869 * dentry becomes unhashed. 2870 * 2871 * For directory inode, d_find_alias() can return 2872 * unhashed dentry. But directory inode should have 2873 * one alias at most. 2874 */ 2875 while ((dn = d_find_alias(inode))) { 2876 if (dn == prev) { 2877 dput(dn); 2878 break; 2879 } 2880 d_invalidate(dn); 2881 if (prev) 2882 dput(prev); 2883 prev = dn; 2884 } 2885 if (prev) 2886 dput(prev); 2887 } 2888 2889 /* 2890 * Handle a cap GRANT message from the MDS. (Note that a GRANT may 2891 * actually be a revocation if it specifies a smaller cap set.) 2892 * 2893 * caller holds s_mutex and i_ceph_lock, we drop both. 2894 */ 2895 static void handle_cap_grant(struct ceph_mds_client *mdsc, 2896 struct inode *inode, struct ceph_mds_caps *grant, 2897 struct ceph_string **pns, u64 inline_version, 2898 void *inline_data, u32 inline_len, 2899 struct ceph_buffer *xattr_buf, 2900 struct ceph_mds_session *session, 2901 struct ceph_cap *cap, int issued) 2902 __releases(ci->i_ceph_lock) 2903 __releases(mdsc->snap_rwsem) 2904 { 2905 struct ceph_inode_info *ci = ceph_inode(inode); 2906 int mds = session->s_mds; 2907 int seq = le32_to_cpu(grant->seq); 2908 int newcaps = le32_to_cpu(grant->caps); 2909 int used, wanted, dirty; 2910 u64 size = le64_to_cpu(grant->size); 2911 u64 max_size = le64_to_cpu(grant->max_size); 2912 struct timespec mtime, atime, ctime; 2913 int check_caps = 0; 2914 bool wake = false; 2915 bool writeback = false; 2916 bool queue_trunc = false; 2917 bool queue_invalidate = false; 2918 bool deleted_inode = false; 2919 bool fill_inline = false; 2920 2921 dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n", 2922 inode, cap, mds, seq, ceph_cap_string(newcaps)); 2923 dout(" size %llu max_size %llu, i_size %llu\n", size, max_size, 2924 inode->i_size); 2925 2926 2927 /* 2928 * auth mds of the inode changed. we received the cap export message, 2929 * but still haven't received the cap import message. handle_cap_export 2930 * updated the new auth MDS' cap. 2931 * 2932 * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing a message 2933 * that was sent before the cap import message. So don't remove caps. 2934 */ 2935 if (ceph_seq_cmp(seq, cap->seq) <= 0) { 2936 WARN_ON(cap != ci->i_auth_cap); 2937 WARN_ON(cap->cap_id != le64_to_cpu(grant->cap_id)); 2938 seq = cap->seq; 2939 newcaps |= cap->issued; 2940 } 2941 2942 /* 2943 * If CACHE is being revoked, and we have no dirty buffers, 2944 * try to invalidate (once). (If there are dirty buffers, we 2945 * will invalidate _after_ writeback.) 2946 */ 2947 if (!S_ISDIR(inode->i_mode) && /* don't invalidate readdir cache */ 2948 ((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) && 2949 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 && 2950 !(ci->i_wrbuffer_ref || ci->i_wb_ref)) { 2951 if (try_nonblocking_invalidate(inode)) { 2952 /* there were locked pages.. invalidate later 2953 in a separate thread. */ 2954 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { 2955 queue_invalidate = true; 2956 ci->i_rdcache_revoking = ci->i_rdcache_gen; 2957 } 2958 } 2959 } 2960 2961 /* side effects now are allowed */ 2962 cap->cap_gen = session->s_cap_gen; 2963 cap->seq = seq; 2964 2965 __check_cap_issue(ci, cap, newcaps); 2966 2967 if ((newcaps & CEPH_CAP_AUTH_SHARED) && 2968 (issued & CEPH_CAP_AUTH_EXCL) == 0) { 2969 inode->i_mode = le32_to_cpu(grant->mode); 2970 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid)); 2971 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid)); 2972 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, 2973 from_kuid(&init_user_ns, inode->i_uid), 2974 from_kgid(&init_user_ns, inode->i_gid)); 2975 } 2976 2977 if ((newcaps & CEPH_CAP_AUTH_SHARED) && 2978 (issued & CEPH_CAP_LINK_EXCL) == 0) { 2979 set_nlink(inode, le32_to_cpu(grant->nlink)); 2980 if (inode->i_nlink == 0 && 2981 (newcaps & (CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL))) 2982 deleted_inode = true; 2983 } 2984 2985 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) { 2986 int len = le32_to_cpu(grant->xattr_len); 2987 u64 version = le64_to_cpu(grant->xattr_version); 2988 2989 if (version > ci->i_xattrs.version) { 2990 dout(" got new xattrs v%llu on %p len %d\n", 2991 version, inode, len); 2992 if (ci->i_xattrs.blob) 2993 ceph_buffer_put(ci->i_xattrs.blob); 2994 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf); 2995 ci->i_xattrs.version = version; 2996 ceph_forget_all_cached_acls(inode); 2997 } 2998 } 2999 3000 if (newcaps & CEPH_CAP_ANY_RD) { 3001 /* ctime/mtime/atime? */ 3002 ceph_decode_timespec(&mtime, &grant->mtime); 3003 ceph_decode_timespec(&atime, &grant->atime); 3004 ceph_decode_timespec(&ctime, &grant->ctime); 3005 ceph_fill_file_time(inode, issued, 3006 le32_to_cpu(grant->time_warp_seq), 3007 &ctime, &mtime, &atime); 3008 } 3009 3010 if (newcaps & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)) { 3011 /* file layout may have changed */ 3012 s64 old_pool = ci->i_layout.pool_id; 3013 struct ceph_string *old_ns; 3014 3015 ceph_file_layout_from_legacy(&ci->i_layout, &grant->layout); 3016 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns, 3017 lockdep_is_held(&ci->i_ceph_lock)); 3018 rcu_assign_pointer(ci->i_layout.pool_ns, *pns); 3019 3020 if (ci->i_layout.pool_id != old_pool || *pns != old_ns) 3021 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM; 3022 3023 *pns = old_ns; 3024 3025 /* size/truncate_seq? */ 3026 queue_trunc = ceph_fill_file_size(inode, issued, 3027 le32_to_cpu(grant->truncate_seq), 3028 le64_to_cpu(grant->truncate_size), 3029 size); 3030 /* max size increase? */ 3031 if (ci->i_auth_cap == cap && max_size != ci->i_max_size) { 3032 dout("max_size %lld -> %llu\n", 3033 ci->i_max_size, max_size); 3034 ci->i_max_size = max_size; 3035 if (max_size >= ci->i_wanted_max_size) { 3036 ci->i_wanted_max_size = 0; /* reset */ 3037 ci->i_requested_max_size = 0; 3038 } 3039 wake = true; 3040 } 3041 } 3042 3043 /* check cap bits */ 3044 wanted = __ceph_caps_wanted(ci); 3045 used = __ceph_caps_used(ci); 3046 dirty = __ceph_caps_dirty(ci); 3047 dout(" my wanted = %s, used = %s, dirty %s\n", 3048 ceph_cap_string(wanted), 3049 ceph_cap_string(used), 3050 ceph_cap_string(dirty)); 3051 if (wanted != le32_to_cpu(grant->wanted)) { 3052 dout("mds wanted %s -> %s\n", 3053 ceph_cap_string(le32_to_cpu(grant->wanted)), 3054 ceph_cap_string(wanted)); 3055 /* imported cap may not have correct mds_wanted */ 3056 if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) 3057 check_caps = 1; 3058 } 3059 3060 /* revocation, grant, or no-op? */ 3061 if (cap->issued & ~newcaps) { 3062 int revoking = cap->issued & ~newcaps; 3063 3064 dout("revocation: %s -> %s (revoking %s)\n", 3065 ceph_cap_string(cap->issued), 3066 ceph_cap_string(newcaps), 3067 ceph_cap_string(revoking)); 3068 if (revoking & used & CEPH_CAP_FILE_BUFFER) 3069 writeback = true; /* initiate writeback; will delay ack */ 3070 else if (revoking == CEPH_CAP_FILE_CACHE && 3071 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 && 3072 queue_invalidate) 3073 ; /* do nothing yet, invalidation will be queued */ 3074 else if (cap == ci->i_auth_cap) 3075 check_caps = 1; /* check auth cap only */ 3076 else 3077 check_caps = 2; /* check all caps */ 3078 cap->issued = newcaps; 3079 cap->implemented |= newcaps; 3080 } else if (cap->issued == newcaps) { 3081 dout("caps unchanged: %s -> %s\n", 3082 ceph_cap_string(cap->issued), ceph_cap_string(newcaps)); 3083 } else { 3084 dout("grant: %s -> %s\n", ceph_cap_string(cap->issued), 3085 ceph_cap_string(newcaps)); 3086 /* non-auth MDS is revoking the newly grant caps ? */ 3087 if (cap == ci->i_auth_cap && 3088 __ceph_caps_revoking_other(ci, cap, newcaps)) 3089 check_caps = 2; 3090 3091 cap->issued = newcaps; 3092 cap->implemented |= newcaps; /* add bits only, to 3093 * avoid stepping on a 3094 * pending revocation */ 3095 wake = true; 3096 } 3097 BUG_ON(cap->issued & ~cap->implemented); 3098 3099 if (inline_version > 0 && inline_version >= ci->i_inline_version) { 3100 ci->i_inline_version = inline_version; 3101 if (ci->i_inline_version != CEPH_INLINE_NONE && 3102 (newcaps & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO))) 3103 fill_inline = true; 3104 } 3105 3106 if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) { 3107 if (newcaps & ~issued) 3108 wake = true; 3109 kick_flushing_inode_caps(mdsc, session, inode); 3110 up_read(&mdsc->snap_rwsem); 3111 } else { 3112 spin_unlock(&ci->i_ceph_lock); 3113 } 3114 3115 if (fill_inline) 3116 ceph_fill_inline_data(inode, NULL, inline_data, inline_len); 3117 3118 if (queue_trunc) 3119 ceph_queue_vmtruncate(inode); 3120 3121 if (writeback) 3122 /* 3123 * queue inode for writeback: we can't actually call 3124 * filemap_write_and_wait, etc. from message handler 3125 * context. 3126 */ 3127 ceph_queue_writeback(inode); 3128 if (queue_invalidate) 3129 ceph_queue_invalidate(inode); 3130 if (deleted_inode) 3131 invalidate_aliases(inode); 3132 if (wake) 3133 wake_up_all(&ci->i_cap_wq); 3134 3135 if (check_caps == 1) 3136 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY, 3137 session); 3138 else if (check_caps == 2) 3139 ceph_check_caps(ci, CHECK_CAPS_NODELAY, session); 3140 else 3141 mutex_unlock(&session->s_mutex); 3142 } 3143 3144 /* 3145 * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the 3146 * MDS has been safely committed. 3147 */ 3148 static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid, 3149 struct ceph_mds_caps *m, 3150 struct ceph_mds_session *session, 3151 struct ceph_cap *cap) 3152 __releases(ci->i_ceph_lock) 3153 { 3154 struct ceph_inode_info *ci = ceph_inode(inode); 3155 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 3156 struct ceph_cap_flush *cf, *tmp_cf; 3157 LIST_HEAD(to_remove); 3158 unsigned seq = le32_to_cpu(m->seq); 3159 int dirty = le32_to_cpu(m->dirty); 3160 int cleaned = 0; 3161 bool drop = false; 3162 bool wake_ci = 0; 3163 bool wake_mdsc = 0; 3164 3165 list_for_each_entry_safe(cf, tmp_cf, &ci->i_cap_flush_list, i_list) { 3166 if (cf->tid == flush_tid) 3167 cleaned = cf->caps; 3168 if (cf->caps == 0) /* capsnap */ 3169 continue; 3170 if (cf->tid <= flush_tid) { 3171 if (__finish_cap_flush(NULL, ci, cf)) 3172 wake_ci = true; 3173 list_add_tail(&cf->i_list, &to_remove); 3174 } else { 3175 cleaned &= ~cf->caps; 3176 if (!cleaned) 3177 break; 3178 } 3179 } 3180 3181 dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s," 3182 " flushing %s -> %s\n", 3183 inode, session->s_mds, seq, ceph_cap_string(dirty), 3184 ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps), 3185 ceph_cap_string(ci->i_flushing_caps & ~cleaned)); 3186 3187 if (list_empty(&to_remove) && !cleaned) 3188 goto out; 3189 3190 ci->i_flushing_caps &= ~cleaned; 3191 3192 spin_lock(&mdsc->cap_dirty_lock); 3193 3194 list_for_each_entry(cf, &to_remove, i_list) { 3195 if (__finish_cap_flush(mdsc, NULL, cf)) 3196 wake_mdsc = true; 3197 } 3198 3199 if (ci->i_flushing_caps == 0) { 3200 if (list_empty(&ci->i_cap_flush_list)) { 3201 list_del_init(&ci->i_flushing_item); 3202 if (!list_empty(&session->s_cap_flushing)) { 3203 dout(" mds%d still flushing cap on %p\n", 3204 session->s_mds, 3205 &list_first_entry(&session->s_cap_flushing, 3206 struct ceph_inode_info, 3207 i_flushing_item)->vfs_inode); 3208 } 3209 } 3210 mdsc->num_cap_flushing--; 3211 dout(" inode %p now !flushing\n", inode); 3212 3213 if (ci->i_dirty_caps == 0) { 3214 dout(" inode %p now clean\n", inode); 3215 BUG_ON(!list_empty(&ci->i_dirty_item)); 3216 drop = true; 3217 if (ci->i_wr_ref == 0 && 3218 ci->i_wrbuffer_ref_head == 0) { 3219 BUG_ON(!ci->i_head_snapc); 3220 ceph_put_snap_context(ci->i_head_snapc); 3221 ci->i_head_snapc = NULL; 3222 } 3223 } else { 3224 BUG_ON(list_empty(&ci->i_dirty_item)); 3225 } 3226 } 3227 spin_unlock(&mdsc->cap_dirty_lock); 3228 3229 out: 3230 spin_unlock(&ci->i_ceph_lock); 3231 3232 while (!list_empty(&to_remove)) { 3233 cf = list_first_entry(&to_remove, 3234 struct ceph_cap_flush, i_list); 3235 list_del(&cf->i_list); 3236 ceph_free_cap_flush(cf); 3237 } 3238 3239 if (wake_ci) 3240 wake_up_all(&ci->i_cap_wq); 3241 if (wake_mdsc) 3242 wake_up_all(&mdsc->cap_flushing_wq); 3243 if (drop) 3244 iput(inode); 3245 } 3246 3247 /* 3248 * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can 3249 * throw away our cap_snap. 3250 * 3251 * Caller hold s_mutex. 3252 */ 3253 static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid, 3254 struct ceph_mds_caps *m, 3255 struct ceph_mds_session *session) 3256 { 3257 struct ceph_inode_info *ci = ceph_inode(inode); 3258 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 3259 u64 follows = le64_to_cpu(m->snap_follows); 3260 struct ceph_cap_snap *capsnap; 3261 bool flushed = false; 3262 bool wake_ci = false; 3263 bool wake_mdsc = false; 3264 3265 dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n", 3266 inode, ci, session->s_mds, follows); 3267 3268 spin_lock(&ci->i_ceph_lock); 3269 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 3270 if (capsnap->follows == follows) { 3271 if (capsnap->cap_flush.tid != flush_tid) { 3272 dout(" cap_snap %p follows %lld tid %lld !=" 3273 " %lld\n", capsnap, follows, 3274 flush_tid, capsnap->cap_flush.tid); 3275 break; 3276 } 3277 flushed = true; 3278 break; 3279 } else { 3280 dout(" skipping cap_snap %p follows %lld\n", 3281 capsnap, capsnap->follows); 3282 } 3283 } 3284 if (flushed) { 3285 WARN_ON(capsnap->dirty_pages || capsnap->writing); 3286 dout(" removing %p cap_snap %p follows %lld\n", 3287 inode, capsnap, follows); 3288 list_del(&capsnap->ci_item); 3289 if (__finish_cap_flush(NULL, ci, &capsnap->cap_flush)) 3290 wake_ci = true; 3291 3292 spin_lock(&mdsc->cap_dirty_lock); 3293 3294 if (list_empty(&ci->i_cap_flush_list)) 3295 list_del_init(&ci->i_flushing_item); 3296 3297 if (__finish_cap_flush(mdsc, NULL, &capsnap->cap_flush)) 3298 wake_mdsc = true; 3299 3300 spin_unlock(&mdsc->cap_dirty_lock); 3301 } 3302 spin_unlock(&ci->i_ceph_lock); 3303 if (flushed) { 3304 ceph_put_snap_context(capsnap->context); 3305 ceph_put_cap_snap(capsnap); 3306 if (wake_ci) 3307 wake_up_all(&ci->i_cap_wq); 3308 if (wake_mdsc) 3309 wake_up_all(&mdsc->cap_flushing_wq); 3310 iput(inode); 3311 } 3312 } 3313 3314 /* 3315 * Handle TRUNC from MDS, indicating file truncation. 3316 * 3317 * caller hold s_mutex. 3318 */ 3319 static void handle_cap_trunc(struct inode *inode, 3320 struct ceph_mds_caps *trunc, 3321 struct ceph_mds_session *session) 3322 __releases(ci->i_ceph_lock) 3323 { 3324 struct ceph_inode_info *ci = ceph_inode(inode); 3325 int mds = session->s_mds; 3326 int seq = le32_to_cpu(trunc->seq); 3327 u32 truncate_seq = le32_to_cpu(trunc->truncate_seq); 3328 u64 truncate_size = le64_to_cpu(trunc->truncate_size); 3329 u64 size = le64_to_cpu(trunc->size); 3330 int implemented = 0; 3331 int dirty = __ceph_caps_dirty(ci); 3332 int issued = __ceph_caps_issued(ceph_inode(inode), &implemented); 3333 int queue_trunc = 0; 3334 3335 issued |= implemented | dirty; 3336 3337 dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n", 3338 inode, mds, seq, truncate_size, truncate_seq); 3339 queue_trunc = ceph_fill_file_size(inode, issued, 3340 truncate_seq, truncate_size, size); 3341 spin_unlock(&ci->i_ceph_lock); 3342 3343 if (queue_trunc) 3344 ceph_queue_vmtruncate(inode); 3345 } 3346 3347 /* 3348 * Handle EXPORT from MDS. Cap is being migrated _from_ this mds to a 3349 * different one. If we are the most recent migration we've seen (as 3350 * indicated by mseq), make note of the migrating cap bits for the 3351 * duration (until we see the corresponding IMPORT). 3352 * 3353 * caller holds s_mutex 3354 */ 3355 static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex, 3356 struct ceph_mds_cap_peer *ph, 3357 struct ceph_mds_session *session) 3358 { 3359 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 3360 struct ceph_mds_session *tsession = NULL; 3361 struct ceph_cap *cap, *tcap, *new_cap = NULL; 3362 struct ceph_inode_info *ci = ceph_inode(inode); 3363 u64 t_cap_id; 3364 unsigned mseq = le32_to_cpu(ex->migrate_seq); 3365 unsigned t_seq, t_mseq; 3366 int target, issued; 3367 int mds = session->s_mds; 3368 3369 if (ph) { 3370 t_cap_id = le64_to_cpu(ph->cap_id); 3371 t_seq = le32_to_cpu(ph->seq); 3372 t_mseq = le32_to_cpu(ph->mseq); 3373 target = le32_to_cpu(ph->mds); 3374 } else { 3375 t_cap_id = t_seq = t_mseq = 0; 3376 target = -1; 3377 } 3378 3379 dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n", 3380 inode, ci, mds, mseq, target); 3381 retry: 3382 spin_lock(&ci->i_ceph_lock); 3383 cap = __get_cap_for_mds(ci, mds); 3384 if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id)) 3385 goto out_unlock; 3386 3387 if (target < 0) { 3388 __ceph_remove_cap(cap, false); 3389 if (!ci->i_auth_cap) 3390 ci->i_ceph_flags |= CEPH_I_CAP_DROPPED; 3391 goto out_unlock; 3392 } 3393 3394 /* 3395 * now we know we haven't received the cap import message yet 3396 * because the exported cap still exist. 3397 */ 3398 3399 issued = cap->issued; 3400 WARN_ON(issued != cap->implemented); 3401 3402 tcap = __get_cap_for_mds(ci, target); 3403 if (tcap) { 3404 /* already have caps from the target */ 3405 if (tcap->cap_id != t_cap_id || 3406 ceph_seq_cmp(tcap->seq, t_seq) < 0) { 3407 dout(" updating import cap %p mds%d\n", tcap, target); 3408 tcap->cap_id = t_cap_id; 3409 tcap->seq = t_seq - 1; 3410 tcap->issue_seq = t_seq - 1; 3411 tcap->mseq = t_mseq; 3412 tcap->issued |= issued; 3413 tcap->implemented |= issued; 3414 if (cap == ci->i_auth_cap) 3415 ci->i_auth_cap = tcap; 3416 3417 if (!list_empty(&ci->i_cap_flush_list) && 3418 ci->i_auth_cap == tcap) { 3419 spin_lock(&mdsc->cap_dirty_lock); 3420 list_move_tail(&ci->i_flushing_item, 3421 &tcap->session->s_cap_flushing); 3422 spin_unlock(&mdsc->cap_dirty_lock); 3423 } 3424 } 3425 __ceph_remove_cap(cap, false); 3426 goto out_unlock; 3427 } else if (tsession) { 3428 /* add placeholder for the export tagert */ 3429 int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0; 3430 tcap = new_cap; 3431 ceph_add_cap(inode, tsession, t_cap_id, -1, issued, 0, 3432 t_seq - 1, t_mseq, (u64)-1, flag, &new_cap); 3433 3434 if (!list_empty(&ci->i_cap_flush_list) && 3435 ci->i_auth_cap == tcap) { 3436 spin_lock(&mdsc->cap_dirty_lock); 3437 list_move_tail(&ci->i_flushing_item, 3438 &tcap->session->s_cap_flushing); 3439 spin_unlock(&mdsc->cap_dirty_lock); 3440 } 3441 3442 __ceph_remove_cap(cap, false); 3443 goto out_unlock; 3444 } 3445 3446 spin_unlock(&ci->i_ceph_lock); 3447 mutex_unlock(&session->s_mutex); 3448 3449 /* open target session */ 3450 tsession = ceph_mdsc_open_export_target_session(mdsc, target); 3451 if (!IS_ERR(tsession)) { 3452 if (mds > target) { 3453 mutex_lock(&session->s_mutex); 3454 mutex_lock_nested(&tsession->s_mutex, 3455 SINGLE_DEPTH_NESTING); 3456 } else { 3457 mutex_lock(&tsession->s_mutex); 3458 mutex_lock_nested(&session->s_mutex, 3459 SINGLE_DEPTH_NESTING); 3460 } 3461 new_cap = ceph_get_cap(mdsc, NULL); 3462 } else { 3463 WARN_ON(1); 3464 tsession = NULL; 3465 target = -1; 3466 } 3467 goto retry; 3468 3469 out_unlock: 3470 spin_unlock(&ci->i_ceph_lock); 3471 mutex_unlock(&session->s_mutex); 3472 if (tsession) { 3473 mutex_unlock(&tsession->s_mutex); 3474 ceph_put_mds_session(tsession); 3475 } 3476 if (new_cap) 3477 ceph_put_cap(mdsc, new_cap); 3478 } 3479 3480 /* 3481 * Handle cap IMPORT. 3482 * 3483 * caller holds s_mutex. acquires i_ceph_lock 3484 */ 3485 static void handle_cap_import(struct ceph_mds_client *mdsc, 3486 struct inode *inode, struct ceph_mds_caps *im, 3487 struct ceph_mds_cap_peer *ph, 3488 struct ceph_mds_session *session, 3489 struct ceph_cap **target_cap, int *old_issued) 3490 __acquires(ci->i_ceph_lock) 3491 { 3492 struct ceph_inode_info *ci = ceph_inode(inode); 3493 struct ceph_cap *cap, *ocap, *new_cap = NULL; 3494 int mds = session->s_mds; 3495 int issued; 3496 unsigned caps = le32_to_cpu(im->caps); 3497 unsigned wanted = le32_to_cpu(im->wanted); 3498 unsigned seq = le32_to_cpu(im->seq); 3499 unsigned mseq = le32_to_cpu(im->migrate_seq); 3500 u64 realmino = le64_to_cpu(im->realm); 3501 u64 cap_id = le64_to_cpu(im->cap_id); 3502 u64 p_cap_id; 3503 int peer; 3504 3505 if (ph) { 3506 p_cap_id = le64_to_cpu(ph->cap_id); 3507 peer = le32_to_cpu(ph->mds); 3508 } else { 3509 p_cap_id = 0; 3510 peer = -1; 3511 } 3512 3513 dout("handle_cap_import inode %p ci %p mds%d mseq %d peer %d\n", 3514 inode, ci, mds, mseq, peer); 3515 3516 retry: 3517 spin_lock(&ci->i_ceph_lock); 3518 cap = __get_cap_for_mds(ci, mds); 3519 if (!cap) { 3520 if (!new_cap) { 3521 spin_unlock(&ci->i_ceph_lock); 3522 new_cap = ceph_get_cap(mdsc, NULL); 3523 goto retry; 3524 } 3525 cap = new_cap; 3526 } else { 3527 if (new_cap) { 3528 ceph_put_cap(mdsc, new_cap); 3529 new_cap = NULL; 3530 } 3531 } 3532 3533 __ceph_caps_issued(ci, &issued); 3534 issued |= __ceph_caps_dirty(ci); 3535 3536 ceph_add_cap(inode, session, cap_id, -1, caps, wanted, seq, mseq, 3537 realmino, CEPH_CAP_FLAG_AUTH, &new_cap); 3538 3539 ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL; 3540 if (ocap && ocap->cap_id == p_cap_id) { 3541 dout(" remove export cap %p mds%d flags %d\n", 3542 ocap, peer, ph->flags); 3543 if ((ph->flags & CEPH_CAP_FLAG_AUTH) && 3544 (ocap->seq != le32_to_cpu(ph->seq) || 3545 ocap->mseq != le32_to_cpu(ph->mseq))) { 3546 pr_err("handle_cap_import: mismatched seq/mseq: " 3547 "ino (%llx.%llx) mds%d seq %d mseq %d " 3548 "importer mds%d has peer seq %d mseq %d\n", 3549 ceph_vinop(inode), peer, ocap->seq, 3550 ocap->mseq, mds, le32_to_cpu(ph->seq), 3551 le32_to_cpu(ph->mseq)); 3552 } 3553 __ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE)); 3554 } 3555 3556 /* make sure we re-request max_size, if necessary */ 3557 ci->i_wanted_max_size = 0; 3558 ci->i_requested_max_size = 0; 3559 3560 *old_issued = issued; 3561 *target_cap = cap; 3562 } 3563 3564 /* 3565 * Handle a caps message from the MDS. 3566 * 3567 * Identify the appropriate session, inode, and call the right handler 3568 * based on the cap op. 3569 */ 3570 void ceph_handle_caps(struct ceph_mds_session *session, 3571 struct ceph_msg *msg) 3572 { 3573 struct ceph_mds_client *mdsc = session->s_mdsc; 3574 struct super_block *sb = mdsc->fsc->sb; 3575 struct inode *inode; 3576 struct ceph_inode_info *ci; 3577 struct ceph_cap *cap; 3578 struct ceph_mds_caps *h; 3579 struct ceph_mds_cap_peer *peer = NULL; 3580 struct ceph_snap_realm *realm = NULL; 3581 struct ceph_string *pool_ns = NULL; 3582 int mds = session->s_mds; 3583 int op, issued; 3584 u32 seq, mseq; 3585 struct ceph_vino vino; 3586 u64 tid; 3587 u64 inline_version = 0; 3588 void *inline_data = NULL; 3589 u32 inline_len = 0; 3590 void *snaptrace; 3591 size_t snaptrace_len; 3592 void *p, *end; 3593 3594 dout("handle_caps from mds%d\n", mds); 3595 3596 /* decode */ 3597 end = msg->front.iov_base + msg->front.iov_len; 3598 tid = le64_to_cpu(msg->hdr.tid); 3599 if (msg->front.iov_len < sizeof(*h)) 3600 goto bad; 3601 h = msg->front.iov_base; 3602 op = le32_to_cpu(h->op); 3603 vino.ino = le64_to_cpu(h->ino); 3604 vino.snap = CEPH_NOSNAP; 3605 seq = le32_to_cpu(h->seq); 3606 mseq = le32_to_cpu(h->migrate_seq); 3607 3608 snaptrace = h + 1; 3609 snaptrace_len = le32_to_cpu(h->snap_trace_len); 3610 p = snaptrace + snaptrace_len; 3611 3612 if (le16_to_cpu(msg->hdr.version) >= 2) { 3613 u32 flock_len; 3614 ceph_decode_32_safe(&p, end, flock_len, bad); 3615 if (p + flock_len > end) 3616 goto bad; 3617 p += flock_len; 3618 } 3619 3620 if (le16_to_cpu(msg->hdr.version) >= 3) { 3621 if (op == CEPH_CAP_OP_IMPORT) { 3622 if (p + sizeof(*peer) > end) 3623 goto bad; 3624 peer = p; 3625 p += sizeof(*peer); 3626 } else if (op == CEPH_CAP_OP_EXPORT) { 3627 /* recorded in unused fields */ 3628 peer = (void *)&h->size; 3629 } 3630 } 3631 3632 if (le16_to_cpu(msg->hdr.version) >= 4) { 3633 ceph_decode_64_safe(&p, end, inline_version, bad); 3634 ceph_decode_32_safe(&p, end, inline_len, bad); 3635 if (p + inline_len > end) 3636 goto bad; 3637 inline_data = p; 3638 p += inline_len; 3639 } 3640 3641 if (le16_to_cpu(msg->hdr.version) >= 5) { 3642 struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc; 3643 u32 epoch_barrier; 3644 3645 ceph_decode_32_safe(&p, end, epoch_barrier, bad); 3646 ceph_osdc_update_epoch_barrier(osdc, epoch_barrier); 3647 } 3648 3649 if (le16_to_cpu(msg->hdr.version) >= 8) { 3650 u64 flush_tid; 3651 u32 caller_uid, caller_gid; 3652 u32 pool_ns_len; 3653 3654 /* version >= 6 */ 3655 ceph_decode_64_safe(&p, end, flush_tid, bad); 3656 /* version >= 7 */ 3657 ceph_decode_32_safe(&p, end, caller_uid, bad); 3658 ceph_decode_32_safe(&p, end, caller_gid, bad); 3659 /* version >= 8 */ 3660 ceph_decode_32_safe(&p, end, pool_ns_len, bad); 3661 if (pool_ns_len > 0) { 3662 ceph_decode_need(&p, end, pool_ns_len, bad); 3663 pool_ns = ceph_find_or_create_string(p, pool_ns_len); 3664 p += pool_ns_len; 3665 } 3666 } 3667 3668 /* lookup ino */ 3669 inode = ceph_find_inode(sb, vino); 3670 ci = ceph_inode(inode); 3671 dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino, 3672 vino.snap, inode); 3673 3674 mutex_lock(&session->s_mutex); 3675 session->s_seq++; 3676 dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq, 3677 (unsigned)seq); 3678 3679 if (!inode) { 3680 dout(" i don't have ino %llx\n", vino.ino); 3681 3682 if (op == CEPH_CAP_OP_IMPORT) { 3683 cap = ceph_get_cap(mdsc, NULL); 3684 cap->cap_ino = vino.ino; 3685 cap->queue_release = 1; 3686 cap->cap_id = le64_to_cpu(h->cap_id); 3687 cap->mseq = mseq; 3688 cap->seq = seq; 3689 cap->issue_seq = seq; 3690 spin_lock(&session->s_cap_lock); 3691 list_add_tail(&cap->session_caps, 3692 &session->s_cap_releases); 3693 session->s_num_cap_releases++; 3694 spin_unlock(&session->s_cap_lock); 3695 } 3696 goto flush_cap_releases; 3697 } 3698 3699 /* these will work even if we don't have a cap yet */ 3700 switch (op) { 3701 case CEPH_CAP_OP_FLUSHSNAP_ACK: 3702 handle_cap_flushsnap_ack(inode, tid, h, session); 3703 goto done; 3704 3705 case CEPH_CAP_OP_EXPORT: 3706 handle_cap_export(inode, h, peer, session); 3707 goto done_unlocked; 3708 3709 case CEPH_CAP_OP_IMPORT: 3710 realm = NULL; 3711 if (snaptrace_len) { 3712 down_write(&mdsc->snap_rwsem); 3713 ceph_update_snap_trace(mdsc, snaptrace, 3714 snaptrace + snaptrace_len, 3715 false, &realm); 3716 downgrade_write(&mdsc->snap_rwsem); 3717 } else { 3718 down_read(&mdsc->snap_rwsem); 3719 } 3720 handle_cap_import(mdsc, inode, h, peer, session, 3721 &cap, &issued); 3722 handle_cap_grant(mdsc, inode, h, &pool_ns, 3723 inline_version, inline_data, inline_len, 3724 msg->middle, session, cap, issued); 3725 if (realm) 3726 ceph_put_snap_realm(mdsc, realm); 3727 goto done_unlocked; 3728 } 3729 3730 /* the rest require a cap */ 3731 spin_lock(&ci->i_ceph_lock); 3732 cap = __get_cap_for_mds(ceph_inode(inode), mds); 3733 if (!cap) { 3734 dout(" no cap on %p ino %llx.%llx from mds%d\n", 3735 inode, ceph_ino(inode), ceph_snap(inode), mds); 3736 spin_unlock(&ci->i_ceph_lock); 3737 goto flush_cap_releases; 3738 } 3739 3740 /* note that each of these drops i_ceph_lock for us */ 3741 switch (op) { 3742 case CEPH_CAP_OP_REVOKE: 3743 case CEPH_CAP_OP_GRANT: 3744 __ceph_caps_issued(ci, &issued); 3745 issued |= __ceph_caps_dirty(ci); 3746 handle_cap_grant(mdsc, inode, h, &pool_ns, 3747 inline_version, inline_data, inline_len, 3748 msg->middle, session, cap, issued); 3749 goto done_unlocked; 3750 3751 case CEPH_CAP_OP_FLUSH_ACK: 3752 handle_cap_flush_ack(inode, tid, h, session, cap); 3753 break; 3754 3755 case CEPH_CAP_OP_TRUNC: 3756 handle_cap_trunc(inode, h, session); 3757 break; 3758 3759 default: 3760 spin_unlock(&ci->i_ceph_lock); 3761 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op, 3762 ceph_cap_op_name(op)); 3763 } 3764 3765 goto done; 3766 3767 flush_cap_releases: 3768 /* 3769 * send any cap release message to try to move things 3770 * along for the mds (who clearly thinks we still have this 3771 * cap). 3772 */ 3773 ceph_send_cap_releases(mdsc, session); 3774 3775 done: 3776 mutex_unlock(&session->s_mutex); 3777 done_unlocked: 3778 iput(inode); 3779 ceph_put_string(pool_ns); 3780 return; 3781 3782 bad: 3783 pr_err("ceph_handle_caps: corrupt message\n"); 3784 ceph_msg_dump(msg); 3785 return; 3786 } 3787 3788 /* 3789 * Delayed work handler to process end of delayed cap release LRU list. 3790 */ 3791 void ceph_check_delayed_caps(struct ceph_mds_client *mdsc) 3792 { 3793 struct ceph_inode_info *ci; 3794 int flags = CHECK_CAPS_NODELAY; 3795 3796 dout("check_delayed_caps\n"); 3797 while (1) { 3798 spin_lock(&mdsc->cap_delay_lock); 3799 if (list_empty(&mdsc->cap_delay_list)) 3800 break; 3801 ci = list_first_entry(&mdsc->cap_delay_list, 3802 struct ceph_inode_info, 3803 i_cap_delay_list); 3804 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 && 3805 time_before(jiffies, ci->i_hold_caps_max)) 3806 break; 3807 list_del_init(&ci->i_cap_delay_list); 3808 spin_unlock(&mdsc->cap_delay_lock); 3809 dout("check_delayed_caps on %p\n", &ci->vfs_inode); 3810 ceph_check_caps(ci, flags, NULL); 3811 } 3812 spin_unlock(&mdsc->cap_delay_lock); 3813 } 3814 3815 /* 3816 * Flush all dirty caps to the mds 3817 */ 3818 void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) 3819 { 3820 struct ceph_inode_info *ci; 3821 struct inode *inode; 3822 3823 dout("flush_dirty_caps\n"); 3824 spin_lock(&mdsc->cap_dirty_lock); 3825 while (!list_empty(&mdsc->cap_dirty)) { 3826 ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info, 3827 i_dirty_item); 3828 inode = &ci->vfs_inode; 3829 ihold(inode); 3830 dout("flush_dirty_caps %p\n", inode); 3831 spin_unlock(&mdsc->cap_dirty_lock); 3832 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL); 3833 iput(inode); 3834 spin_lock(&mdsc->cap_dirty_lock); 3835 } 3836 spin_unlock(&mdsc->cap_dirty_lock); 3837 dout("flush_dirty_caps done\n"); 3838 } 3839 3840 void __ceph_get_fmode(struct ceph_inode_info *ci, int fmode) 3841 { 3842 int i; 3843 int bits = (fmode << 1) | 1; 3844 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) { 3845 if (bits & (1 << i)) 3846 ci->i_nr_by_mode[i]++; 3847 } 3848 } 3849 3850 /* 3851 * Drop open file reference. If we were the last open file, 3852 * we may need to release capabilities to the MDS (or schedule 3853 * their delayed release). 3854 */ 3855 void ceph_put_fmode(struct ceph_inode_info *ci, int fmode) 3856 { 3857 int i, last = 0; 3858 int bits = (fmode << 1) | 1; 3859 spin_lock(&ci->i_ceph_lock); 3860 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) { 3861 if (bits & (1 << i)) { 3862 BUG_ON(ci->i_nr_by_mode[i] == 0); 3863 if (--ci->i_nr_by_mode[i] == 0) 3864 last++; 3865 } 3866 } 3867 dout("put_fmode %p fmode %d {%d,%d,%d,%d}\n", 3868 &ci->vfs_inode, fmode, 3869 ci->i_nr_by_mode[0], ci->i_nr_by_mode[1], 3870 ci->i_nr_by_mode[2], ci->i_nr_by_mode[3]); 3871 spin_unlock(&ci->i_ceph_lock); 3872 3873 if (last && ci->i_vino.snap == CEPH_NOSNAP) 3874 ceph_check_caps(ci, 0, NULL); 3875 } 3876 3877 /* 3878 * Helpers for embedding cap and dentry lease releases into mds 3879 * requests. 3880 * 3881 * @force is used by dentry_release (below) to force inclusion of a 3882 * record for the directory inode, even when there aren't any caps to 3883 * drop. 3884 */ 3885 int ceph_encode_inode_release(void **p, struct inode *inode, 3886 int mds, int drop, int unless, int force) 3887 { 3888 struct ceph_inode_info *ci = ceph_inode(inode); 3889 struct ceph_cap *cap; 3890 struct ceph_mds_request_release *rel = *p; 3891 int used, dirty; 3892 int ret = 0; 3893 3894 spin_lock(&ci->i_ceph_lock); 3895 used = __ceph_caps_used(ci); 3896 dirty = __ceph_caps_dirty(ci); 3897 3898 dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n", 3899 inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop), 3900 ceph_cap_string(unless)); 3901 3902 /* only drop unused, clean caps */ 3903 drop &= ~(used | dirty); 3904 3905 cap = __get_cap_for_mds(ci, mds); 3906 if (cap && __cap_is_valid(cap)) { 3907 if (force || 3908 ((cap->issued & drop) && 3909 (cap->issued & unless) == 0)) { 3910 if ((cap->issued & drop) && 3911 (cap->issued & unless) == 0) { 3912 int wanted = __ceph_caps_wanted(ci); 3913 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0) 3914 wanted |= cap->mds_wanted; 3915 dout("encode_inode_release %p cap %p " 3916 "%s -> %s, wanted %s -> %s\n", inode, cap, 3917 ceph_cap_string(cap->issued), 3918 ceph_cap_string(cap->issued & ~drop), 3919 ceph_cap_string(cap->mds_wanted), 3920 ceph_cap_string(wanted)); 3921 3922 cap->issued &= ~drop; 3923 cap->implemented &= ~drop; 3924 cap->mds_wanted = wanted; 3925 } else { 3926 dout("encode_inode_release %p cap %p %s" 3927 " (force)\n", inode, cap, 3928 ceph_cap_string(cap->issued)); 3929 } 3930 3931 rel->ino = cpu_to_le64(ceph_ino(inode)); 3932 rel->cap_id = cpu_to_le64(cap->cap_id); 3933 rel->seq = cpu_to_le32(cap->seq); 3934 rel->issue_seq = cpu_to_le32(cap->issue_seq); 3935 rel->mseq = cpu_to_le32(cap->mseq); 3936 rel->caps = cpu_to_le32(cap->implemented); 3937 rel->wanted = cpu_to_le32(cap->mds_wanted); 3938 rel->dname_len = 0; 3939 rel->dname_seq = 0; 3940 *p += sizeof(*rel); 3941 ret = 1; 3942 } else { 3943 dout("encode_inode_release %p cap %p %s\n", 3944 inode, cap, ceph_cap_string(cap->issued)); 3945 } 3946 } 3947 spin_unlock(&ci->i_ceph_lock); 3948 return ret; 3949 } 3950 3951 int ceph_encode_dentry_release(void **p, struct dentry *dentry, 3952 struct inode *dir, 3953 int mds, int drop, int unless) 3954 { 3955 struct dentry *parent = NULL; 3956 struct ceph_mds_request_release *rel = *p; 3957 struct ceph_dentry_info *di = ceph_dentry(dentry); 3958 int force = 0; 3959 int ret; 3960 3961 /* 3962 * force an record for the directory caps if we have a dentry lease. 3963 * this is racy (can't take i_ceph_lock and d_lock together), but it 3964 * doesn't have to be perfect; the mds will revoke anything we don't 3965 * release. 3966 */ 3967 spin_lock(&dentry->d_lock); 3968 if (di->lease_session && di->lease_session->s_mds == mds) 3969 force = 1; 3970 if (!dir) { 3971 parent = dget(dentry->d_parent); 3972 dir = d_inode(parent); 3973 } 3974 spin_unlock(&dentry->d_lock); 3975 3976 ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force); 3977 dput(parent); 3978 3979 spin_lock(&dentry->d_lock); 3980 if (ret && di->lease_session && di->lease_session->s_mds == mds) { 3981 dout("encode_dentry_release %p mds%d seq %d\n", 3982 dentry, mds, (int)di->lease_seq); 3983 rel->dname_len = cpu_to_le32(dentry->d_name.len); 3984 memcpy(*p, dentry->d_name.name, dentry->d_name.len); 3985 *p += dentry->d_name.len; 3986 rel->dname_seq = cpu_to_le32(di->lease_seq); 3987 __ceph_mdsc_drop_dentry_lease(dentry); 3988 } 3989 spin_unlock(&dentry->d_lock); 3990 return ret; 3991 } 3992