1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/fs.h> 5 #include <linux/kernel.h> 6 #include <linux/sched/signal.h> 7 #include <linux/slab.h> 8 #include <linux/vmalloc.h> 9 #include <linux/wait.h> 10 #include <linux/writeback.h> 11 #include <linux/iversion.h> 12 13 #include "super.h" 14 #include "mds_client.h" 15 #include "cache.h" 16 #include <linux/ceph/decode.h> 17 #include <linux/ceph/messenger.h> 18 19 /* 20 * Capability management 21 * 22 * The Ceph metadata servers control client access to inode metadata 23 * and file data by issuing capabilities, granting clients permission 24 * to read and/or write both inode field and file data to OSDs 25 * (storage nodes). Each capability consists of a set of bits 26 * indicating which operations are allowed. 27 * 28 * If the client holds a *_SHARED cap, the client has a coherent value 29 * that can be safely read from the cached inode. 30 * 31 * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the 32 * client is allowed to change inode attributes (e.g., file size, 33 * mtime), note its dirty state in the ceph_cap, and asynchronously 34 * flush that metadata change to the MDS. 35 * 36 * In the event of a conflicting operation (perhaps by another 37 * client), the MDS will revoke the conflicting client capabilities. 38 * 39 * In order for a client to cache an inode, it must hold a capability 40 * with at least one MDS server. When inodes are released, release 41 * notifications are batched and periodically sent en masse to the MDS 42 * cluster to release server state. 43 */ 44 45 static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc); 46 static void __kick_flushing_caps(struct ceph_mds_client *mdsc, 47 struct ceph_mds_session *session, 48 struct ceph_inode_info *ci, 49 u64 oldest_flush_tid); 50 51 /* 52 * Generate readable cap strings for debugging output. 53 */ 54 #define MAX_CAP_STR 20 55 static char cap_str[MAX_CAP_STR][40]; 56 static DEFINE_SPINLOCK(cap_str_lock); 57 static int last_cap_str; 58 59 static char *gcap_string(char *s, int c) 60 { 61 if (c & CEPH_CAP_GSHARED) 62 *s++ = 's'; 63 if (c & CEPH_CAP_GEXCL) 64 *s++ = 'x'; 65 if (c & CEPH_CAP_GCACHE) 66 *s++ = 'c'; 67 if (c & CEPH_CAP_GRD) 68 *s++ = 'r'; 69 if (c & CEPH_CAP_GWR) 70 *s++ = 'w'; 71 if (c & CEPH_CAP_GBUFFER) 72 *s++ = 'b'; 73 if (c & CEPH_CAP_GWREXTEND) 74 *s++ = 'a'; 75 if (c & CEPH_CAP_GLAZYIO) 76 *s++ = 'l'; 77 return s; 78 } 79 80 const char *ceph_cap_string(int caps) 81 { 82 int i; 83 char *s; 84 int c; 85 86 spin_lock(&cap_str_lock); 87 i = last_cap_str++; 88 if (last_cap_str == MAX_CAP_STR) 89 last_cap_str = 0; 90 spin_unlock(&cap_str_lock); 91 92 s = cap_str[i]; 93 94 if (caps & CEPH_CAP_PIN) 95 *s++ = 'p'; 96 97 c = (caps >> CEPH_CAP_SAUTH) & 3; 98 if (c) { 99 *s++ = 'A'; 100 s = gcap_string(s, c); 101 } 102 103 c = (caps >> CEPH_CAP_SLINK) & 3; 104 if (c) { 105 *s++ = 'L'; 106 s = gcap_string(s, c); 107 } 108 109 c = (caps >> CEPH_CAP_SXATTR) & 3; 110 if (c) { 111 *s++ = 'X'; 112 s = gcap_string(s, c); 113 } 114 115 c = caps >> CEPH_CAP_SFILE; 116 if (c) { 117 *s++ = 'F'; 118 s = gcap_string(s, c); 119 } 120 121 if (s == cap_str[i]) 122 *s++ = '-'; 123 *s = 0; 124 return cap_str[i]; 125 } 126 127 void ceph_caps_init(struct ceph_mds_client *mdsc) 128 { 129 INIT_LIST_HEAD(&mdsc->caps_list); 130 spin_lock_init(&mdsc->caps_list_lock); 131 } 132 133 void ceph_caps_finalize(struct ceph_mds_client *mdsc) 134 { 135 struct ceph_cap *cap; 136 137 spin_lock(&mdsc->caps_list_lock); 138 while (!list_empty(&mdsc->caps_list)) { 139 cap = list_first_entry(&mdsc->caps_list, 140 struct ceph_cap, caps_item); 141 list_del(&cap->caps_item); 142 kmem_cache_free(ceph_cap_cachep, cap); 143 } 144 mdsc->caps_total_count = 0; 145 mdsc->caps_avail_count = 0; 146 mdsc->caps_use_count = 0; 147 mdsc->caps_reserve_count = 0; 148 mdsc->caps_min_count = 0; 149 spin_unlock(&mdsc->caps_list_lock); 150 } 151 152 void ceph_adjust_caps_max_min(struct ceph_mds_client *mdsc, 153 struct ceph_mount_options *fsopt) 154 { 155 spin_lock(&mdsc->caps_list_lock); 156 mdsc->caps_min_count = fsopt->max_readdir; 157 if (mdsc->caps_min_count < 1024) 158 mdsc->caps_min_count = 1024; 159 mdsc->caps_use_max = fsopt->caps_max; 160 if (mdsc->caps_use_max > 0 && 161 mdsc->caps_use_max < mdsc->caps_min_count) 162 mdsc->caps_use_max = mdsc->caps_min_count; 163 spin_unlock(&mdsc->caps_list_lock); 164 } 165 166 static void __ceph_unreserve_caps(struct ceph_mds_client *mdsc, int nr_caps) 167 { 168 struct ceph_cap *cap; 169 int i; 170 171 if (nr_caps) { 172 BUG_ON(mdsc->caps_reserve_count < nr_caps); 173 mdsc->caps_reserve_count -= nr_caps; 174 if (mdsc->caps_avail_count >= 175 mdsc->caps_reserve_count + mdsc->caps_min_count) { 176 mdsc->caps_total_count -= nr_caps; 177 for (i = 0; i < nr_caps; i++) { 178 cap = list_first_entry(&mdsc->caps_list, 179 struct ceph_cap, caps_item); 180 list_del(&cap->caps_item); 181 kmem_cache_free(ceph_cap_cachep, cap); 182 } 183 } else { 184 mdsc->caps_avail_count += nr_caps; 185 } 186 187 dout("%s: caps %d = %d used + %d resv + %d avail\n", 188 __func__, 189 mdsc->caps_total_count, mdsc->caps_use_count, 190 mdsc->caps_reserve_count, mdsc->caps_avail_count); 191 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + 192 mdsc->caps_reserve_count + 193 mdsc->caps_avail_count); 194 } 195 } 196 197 /* 198 * Called under mdsc->mutex. 199 */ 200 int ceph_reserve_caps(struct ceph_mds_client *mdsc, 201 struct ceph_cap_reservation *ctx, int need) 202 { 203 int i, j; 204 struct ceph_cap *cap; 205 int have; 206 int alloc = 0; 207 int max_caps; 208 int err = 0; 209 bool trimmed = false; 210 struct ceph_mds_session *s; 211 LIST_HEAD(newcaps); 212 213 dout("reserve caps ctx=%p need=%d\n", ctx, need); 214 215 /* first reserve any caps that are already allocated */ 216 spin_lock(&mdsc->caps_list_lock); 217 if (mdsc->caps_avail_count >= need) 218 have = need; 219 else 220 have = mdsc->caps_avail_count; 221 mdsc->caps_avail_count -= have; 222 mdsc->caps_reserve_count += have; 223 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + 224 mdsc->caps_reserve_count + 225 mdsc->caps_avail_count); 226 spin_unlock(&mdsc->caps_list_lock); 227 228 for (i = have; i < need; ) { 229 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS); 230 if (cap) { 231 list_add(&cap->caps_item, &newcaps); 232 alloc++; 233 i++; 234 continue; 235 } 236 237 if (!trimmed) { 238 for (j = 0; j < mdsc->max_sessions; j++) { 239 s = __ceph_lookup_mds_session(mdsc, j); 240 if (!s) 241 continue; 242 mutex_unlock(&mdsc->mutex); 243 244 mutex_lock(&s->s_mutex); 245 max_caps = s->s_nr_caps - (need - i); 246 ceph_trim_caps(mdsc, s, max_caps); 247 mutex_unlock(&s->s_mutex); 248 249 ceph_put_mds_session(s); 250 mutex_lock(&mdsc->mutex); 251 } 252 trimmed = true; 253 254 spin_lock(&mdsc->caps_list_lock); 255 if (mdsc->caps_avail_count) { 256 int more_have; 257 if (mdsc->caps_avail_count >= need - i) 258 more_have = need - i; 259 else 260 more_have = mdsc->caps_avail_count; 261 262 i += more_have; 263 have += more_have; 264 mdsc->caps_avail_count -= more_have; 265 mdsc->caps_reserve_count += more_have; 266 267 } 268 spin_unlock(&mdsc->caps_list_lock); 269 270 continue; 271 } 272 273 pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n", 274 ctx, need, have + alloc); 275 err = -ENOMEM; 276 break; 277 } 278 279 if (!err) { 280 BUG_ON(have + alloc != need); 281 ctx->count = need; 282 ctx->used = 0; 283 } 284 285 spin_lock(&mdsc->caps_list_lock); 286 mdsc->caps_total_count += alloc; 287 mdsc->caps_reserve_count += alloc; 288 list_splice(&newcaps, &mdsc->caps_list); 289 290 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + 291 mdsc->caps_reserve_count + 292 mdsc->caps_avail_count); 293 294 if (err) 295 __ceph_unreserve_caps(mdsc, have + alloc); 296 297 spin_unlock(&mdsc->caps_list_lock); 298 299 dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n", 300 ctx, mdsc->caps_total_count, mdsc->caps_use_count, 301 mdsc->caps_reserve_count, mdsc->caps_avail_count); 302 return err; 303 } 304 305 void ceph_unreserve_caps(struct ceph_mds_client *mdsc, 306 struct ceph_cap_reservation *ctx) 307 { 308 bool reclaim = false; 309 if (!ctx->count) 310 return; 311 312 dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count); 313 spin_lock(&mdsc->caps_list_lock); 314 __ceph_unreserve_caps(mdsc, ctx->count); 315 ctx->count = 0; 316 317 if (mdsc->caps_use_max > 0 && 318 mdsc->caps_use_count > mdsc->caps_use_max) 319 reclaim = true; 320 spin_unlock(&mdsc->caps_list_lock); 321 322 if (reclaim) 323 ceph_reclaim_caps_nr(mdsc, ctx->used); 324 } 325 326 struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc, 327 struct ceph_cap_reservation *ctx) 328 { 329 struct ceph_cap *cap = NULL; 330 331 /* temporary, until we do something about cap import/export */ 332 if (!ctx) { 333 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS); 334 if (cap) { 335 spin_lock(&mdsc->caps_list_lock); 336 mdsc->caps_use_count++; 337 mdsc->caps_total_count++; 338 spin_unlock(&mdsc->caps_list_lock); 339 } else { 340 spin_lock(&mdsc->caps_list_lock); 341 if (mdsc->caps_avail_count) { 342 BUG_ON(list_empty(&mdsc->caps_list)); 343 344 mdsc->caps_avail_count--; 345 mdsc->caps_use_count++; 346 cap = list_first_entry(&mdsc->caps_list, 347 struct ceph_cap, caps_item); 348 list_del(&cap->caps_item); 349 350 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + 351 mdsc->caps_reserve_count + mdsc->caps_avail_count); 352 } 353 spin_unlock(&mdsc->caps_list_lock); 354 } 355 356 return cap; 357 } 358 359 spin_lock(&mdsc->caps_list_lock); 360 dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n", 361 ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count, 362 mdsc->caps_reserve_count, mdsc->caps_avail_count); 363 BUG_ON(!ctx->count); 364 BUG_ON(ctx->count > mdsc->caps_reserve_count); 365 BUG_ON(list_empty(&mdsc->caps_list)); 366 367 ctx->count--; 368 ctx->used++; 369 mdsc->caps_reserve_count--; 370 mdsc->caps_use_count++; 371 372 cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item); 373 list_del(&cap->caps_item); 374 375 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + 376 mdsc->caps_reserve_count + mdsc->caps_avail_count); 377 spin_unlock(&mdsc->caps_list_lock); 378 return cap; 379 } 380 381 void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap) 382 { 383 spin_lock(&mdsc->caps_list_lock); 384 dout("put_cap %p %d = %d used + %d resv + %d avail\n", 385 cap, mdsc->caps_total_count, mdsc->caps_use_count, 386 mdsc->caps_reserve_count, mdsc->caps_avail_count); 387 mdsc->caps_use_count--; 388 /* 389 * Keep some preallocated caps around (ceph_min_count), to 390 * avoid lots of free/alloc churn. 391 */ 392 if (mdsc->caps_avail_count >= mdsc->caps_reserve_count + 393 mdsc->caps_min_count) { 394 mdsc->caps_total_count--; 395 kmem_cache_free(ceph_cap_cachep, cap); 396 } else { 397 mdsc->caps_avail_count++; 398 list_add(&cap->caps_item, &mdsc->caps_list); 399 } 400 401 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count + 402 mdsc->caps_reserve_count + mdsc->caps_avail_count); 403 spin_unlock(&mdsc->caps_list_lock); 404 } 405 406 void ceph_reservation_status(struct ceph_fs_client *fsc, 407 int *total, int *avail, int *used, int *reserved, 408 int *min) 409 { 410 struct ceph_mds_client *mdsc = fsc->mdsc; 411 412 spin_lock(&mdsc->caps_list_lock); 413 414 if (total) 415 *total = mdsc->caps_total_count; 416 if (avail) 417 *avail = mdsc->caps_avail_count; 418 if (used) 419 *used = mdsc->caps_use_count; 420 if (reserved) 421 *reserved = mdsc->caps_reserve_count; 422 if (min) 423 *min = mdsc->caps_min_count; 424 425 spin_unlock(&mdsc->caps_list_lock); 426 } 427 428 /* 429 * Find ceph_cap for given mds, if any. 430 * 431 * Called with i_ceph_lock held. 432 */ 433 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds) 434 { 435 struct ceph_cap *cap; 436 struct rb_node *n = ci->i_caps.rb_node; 437 438 while (n) { 439 cap = rb_entry(n, struct ceph_cap, ci_node); 440 if (mds < cap->mds) 441 n = n->rb_left; 442 else if (mds > cap->mds) 443 n = n->rb_right; 444 else 445 return cap; 446 } 447 return NULL; 448 } 449 450 struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds) 451 { 452 struct ceph_cap *cap; 453 454 spin_lock(&ci->i_ceph_lock); 455 cap = __get_cap_for_mds(ci, mds); 456 spin_unlock(&ci->i_ceph_lock); 457 return cap; 458 } 459 460 /* 461 * Called under i_ceph_lock. 462 */ 463 static void __insert_cap_node(struct ceph_inode_info *ci, 464 struct ceph_cap *new) 465 { 466 struct rb_node **p = &ci->i_caps.rb_node; 467 struct rb_node *parent = NULL; 468 struct ceph_cap *cap = NULL; 469 470 while (*p) { 471 parent = *p; 472 cap = rb_entry(parent, struct ceph_cap, ci_node); 473 if (new->mds < cap->mds) 474 p = &(*p)->rb_left; 475 else if (new->mds > cap->mds) 476 p = &(*p)->rb_right; 477 else 478 BUG(); 479 } 480 481 rb_link_node(&new->ci_node, parent, p); 482 rb_insert_color(&new->ci_node, &ci->i_caps); 483 } 484 485 /* 486 * (re)set cap hold timeouts, which control the delayed release 487 * of unused caps back to the MDS. Should be called on cap use. 488 */ 489 static void __cap_set_timeouts(struct ceph_mds_client *mdsc, 490 struct ceph_inode_info *ci) 491 { 492 struct ceph_mount_options *opt = mdsc->fsc->mount_options; 493 ci->i_hold_caps_max = round_jiffies(jiffies + 494 opt->caps_wanted_delay_max * HZ); 495 dout("__cap_set_timeouts %p %lu\n", &ci->netfs.inode, 496 ci->i_hold_caps_max - jiffies); 497 } 498 499 /* 500 * (Re)queue cap at the end of the delayed cap release list. 501 * 502 * If I_FLUSH is set, leave the inode at the front of the list. 503 * 504 * Caller holds i_ceph_lock 505 * -> we take mdsc->cap_delay_lock 506 */ 507 static void __cap_delay_requeue(struct ceph_mds_client *mdsc, 508 struct ceph_inode_info *ci) 509 { 510 dout("__cap_delay_requeue %p flags 0x%lx at %lu\n", &ci->netfs.inode, 511 ci->i_ceph_flags, ci->i_hold_caps_max); 512 if (!mdsc->stopping) { 513 spin_lock(&mdsc->cap_delay_lock); 514 if (!list_empty(&ci->i_cap_delay_list)) { 515 if (ci->i_ceph_flags & CEPH_I_FLUSH) 516 goto no_change; 517 list_del_init(&ci->i_cap_delay_list); 518 } 519 __cap_set_timeouts(mdsc, ci); 520 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list); 521 no_change: 522 spin_unlock(&mdsc->cap_delay_lock); 523 } 524 } 525 526 /* 527 * Queue an inode for immediate writeback. Mark inode with I_FLUSH, 528 * indicating we should send a cap message to flush dirty metadata 529 * asap, and move to the front of the delayed cap list. 530 */ 531 static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc, 532 struct ceph_inode_info *ci) 533 { 534 dout("__cap_delay_requeue_front %p\n", &ci->netfs.inode); 535 spin_lock(&mdsc->cap_delay_lock); 536 ci->i_ceph_flags |= CEPH_I_FLUSH; 537 if (!list_empty(&ci->i_cap_delay_list)) 538 list_del_init(&ci->i_cap_delay_list); 539 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list); 540 spin_unlock(&mdsc->cap_delay_lock); 541 } 542 543 /* 544 * Cancel delayed work on cap. 545 * 546 * Caller must hold i_ceph_lock. 547 */ 548 static void __cap_delay_cancel(struct ceph_mds_client *mdsc, 549 struct ceph_inode_info *ci) 550 { 551 dout("__cap_delay_cancel %p\n", &ci->netfs.inode); 552 if (list_empty(&ci->i_cap_delay_list)) 553 return; 554 spin_lock(&mdsc->cap_delay_lock); 555 list_del_init(&ci->i_cap_delay_list); 556 spin_unlock(&mdsc->cap_delay_lock); 557 } 558 559 /* Common issue checks for add_cap, handle_cap_grant. */ 560 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap, 561 unsigned issued) 562 { 563 unsigned had = __ceph_caps_issued(ci, NULL); 564 565 lockdep_assert_held(&ci->i_ceph_lock); 566 567 /* 568 * Each time we receive FILE_CACHE anew, we increment 569 * i_rdcache_gen. 570 */ 571 if (S_ISREG(ci->netfs.inode.i_mode) && 572 (issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) && 573 (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) { 574 ci->i_rdcache_gen++; 575 } 576 577 /* 578 * If FILE_SHARED is newly issued, mark dir not complete. We don't 579 * know what happened to this directory while we didn't have the cap. 580 * If FILE_SHARED is being revoked, also mark dir not complete. It 581 * stops on-going cached readdir. 582 */ 583 if ((issued & CEPH_CAP_FILE_SHARED) != (had & CEPH_CAP_FILE_SHARED)) { 584 if (issued & CEPH_CAP_FILE_SHARED) 585 atomic_inc(&ci->i_shared_gen); 586 if (S_ISDIR(ci->netfs.inode.i_mode)) { 587 dout(" marking %p NOT complete\n", &ci->netfs.inode); 588 __ceph_dir_clear_complete(ci); 589 } 590 } 591 592 /* Wipe saved layout if we're losing DIR_CREATE caps */ 593 if (S_ISDIR(ci->netfs.inode.i_mode) && (had & CEPH_CAP_DIR_CREATE) && 594 !(issued & CEPH_CAP_DIR_CREATE)) { 595 ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns)); 596 memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout)); 597 } 598 } 599 600 /** 601 * change_auth_cap_ses - move inode to appropriate lists when auth caps change 602 * @ci: inode to be moved 603 * @session: new auth caps session 604 */ 605 void change_auth_cap_ses(struct ceph_inode_info *ci, 606 struct ceph_mds_session *session) 607 { 608 lockdep_assert_held(&ci->i_ceph_lock); 609 610 if (list_empty(&ci->i_dirty_item) && list_empty(&ci->i_flushing_item)) 611 return; 612 613 spin_lock(&session->s_mdsc->cap_dirty_lock); 614 if (!list_empty(&ci->i_dirty_item)) 615 list_move(&ci->i_dirty_item, &session->s_cap_dirty); 616 if (!list_empty(&ci->i_flushing_item)) 617 list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing); 618 spin_unlock(&session->s_mdsc->cap_dirty_lock); 619 } 620 621 /* 622 * Add a capability under the given MDS session. 623 * 624 * Caller should hold session snap_rwsem (read) and ci->i_ceph_lock 625 * 626 * @fmode is the open file mode, if we are opening a file, otherwise 627 * it is < 0. (This is so we can atomically add the cap and add an 628 * open file reference to it.) 629 */ 630 void ceph_add_cap(struct inode *inode, 631 struct ceph_mds_session *session, u64 cap_id, 632 unsigned issued, unsigned wanted, 633 unsigned seq, unsigned mseq, u64 realmino, int flags, 634 struct ceph_cap **new_cap) 635 { 636 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 637 struct ceph_inode_info *ci = ceph_inode(inode); 638 struct ceph_cap *cap; 639 int mds = session->s_mds; 640 int actual_wanted; 641 u32 gen; 642 643 lockdep_assert_held(&ci->i_ceph_lock); 644 645 dout("add_cap %p mds%d cap %llx %s seq %d\n", inode, 646 session->s_mds, cap_id, ceph_cap_string(issued), seq); 647 648 gen = atomic_read(&session->s_cap_gen); 649 650 cap = __get_cap_for_mds(ci, mds); 651 if (!cap) { 652 cap = *new_cap; 653 *new_cap = NULL; 654 655 cap->issued = 0; 656 cap->implemented = 0; 657 cap->mds = mds; 658 cap->mds_wanted = 0; 659 cap->mseq = 0; 660 661 cap->ci = ci; 662 __insert_cap_node(ci, cap); 663 664 /* add to session cap list */ 665 cap->session = session; 666 spin_lock(&session->s_cap_lock); 667 list_add_tail(&cap->session_caps, &session->s_caps); 668 session->s_nr_caps++; 669 atomic64_inc(&mdsc->metric.total_caps); 670 spin_unlock(&session->s_cap_lock); 671 } else { 672 spin_lock(&session->s_cap_lock); 673 list_move_tail(&cap->session_caps, &session->s_caps); 674 spin_unlock(&session->s_cap_lock); 675 676 if (cap->cap_gen < gen) 677 cap->issued = cap->implemented = CEPH_CAP_PIN; 678 679 /* 680 * auth mds of the inode changed. we received the cap export 681 * message, but still haven't received the cap import message. 682 * handle_cap_export() updated the new auth MDS' cap. 683 * 684 * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing 685 * a message that was send before the cap import message. So 686 * don't remove caps. 687 */ 688 if (ceph_seq_cmp(seq, cap->seq) <= 0) { 689 WARN_ON(cap != ci->i_auth_cap); 690 WARN_ON(cap->cap_id != cap_id); 691 seq = cap->seq; 692 mseq = cap->mseq; 693 issued |= cap->issued; 694 flags |= CEPH_CAP_FLAG_AUTH; 695 } 696 } 697 698 if (!ci->i_snap_realm || 699 ((flags & CEPH_CAP_FLAG_AUTH) && 700 realmino != (u64)-1 && ci->i_snap_realm->ino != realmino)) { 701 /* 702 * add this inode to the appropriate snap realm 703 */ 704 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc, 705 realmino); 706 if (realm) 707 ceph_change_snap_realm(inode, realm); 708 else 709 WARN(1, "%s: couldn't find snap realm 0x%llx (ino 0x%llx oldrealm 0x%llx)\n", 710 __func__, realmino, ci->i_vino.ino, 711 ci->i_snap_realm ? ci->i_snap_realm->ino : 0); 712 } 713 714 __check_cap_issue(ci, cap, issued); 715 716 /* 717 * If we are issued caps we don't want, or the mds' wanted 718 * value appears to be off, queue a check so we'll release 719 * later and/or update the mds wanted value. 720 */ 721 actual_wanted = __ceph_caps_wanted(ci); 722 if ((wanted & ~actual_wanted) || 723 (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) { 724 dout(" issued %s, mds wanted %s, actual %s, queueing\n", 725 ceph_cap_string(issued), ceph_cap_string(wanted), 726 ceph_cap_string(actual_wanted)); 727 __cap_delay_requeue(mdsc, ci); 728 } 729 730 if (flags & CEPH_CAP_FLAG_AUTH) { 731 if (!ci->i_auth_cap || 732 ceph_seq_cmp(ci->i_auth_cap->mseq, mseq) < 0) { 733 if (ci->i_auth_cap && 734 ci->i_auth_cap->session != cap->session) 735 change_auth_cap_ses(ci, cap->session); 736 ci->i_auth_cap = cap; 737 cap->mds_wanted = wanted; 738 } 739 } else { 740 WARN_ON(ci->i_auth_cap == cap); 741 } 742 743 dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n", 744 inode, ceph_vinop(inode), cap, ceph_cap_string(issued), 745 ceph_cap_string(issued|cap->issued), seq, mds); 746 cap->cap_id = cap_id; 747 cap->issued = issued; 748 cap->implemented |= issued; 749 if (ceph_seq_cmp(mseq, cap->mseq) > 0) 750 cap->mds_wanted = wanted; 751 else 752 cap->mds_wanted |= wanted; 753 cap->seq = seq; 754 cap->issue_seq = seq; 755 cap->mseq = mseq; 756 cap->cap_gen = gen; 757 wake_up_all(&ci->i_cap_wq); 758 } 759 760 /* 761 * Return true if cap has not timed out and belongs to the current 762 * generation of the MDS session (i.e. has not gone 'stale' due to 763 * us losing touch with the mds). 764 */ 765 static int __cap_is_valid(struct ceph_cap *cap) 766 { 767 unsigned long ttl; 768 u32 gen; 769 770 gen = atomic_read(&cap->session->s_cap_gen); 771 ttl = cap->session->s_cap_ttl; 772 773 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) { 774 dout("__cap_is_valid %p cap %p issued %s " 775 "but STALE (gen %u vs %u)\n", &cap->ci->netfs.inode, 776 cap, ceph_cap_string(cap->issued), cap->cap_gen, gen); 777 return 0; 778 } 779 780 return 1; 781 } 782 783 /* 784 * Return set of valid cap bits issued to us. Note that caps time 785 * out, and may be invalidated in bulk if the client session times out 786 * and session->s_cap_gen is bumped. 787 */ 788 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented) 789 { 790 int have = ci->i_snap_caps; 791 struct ceph_cap *cap; 792 struct rb_node *p; 793 794 if (implemented) 795 *implemented = 0; 796 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 797 cap = rb_entry(p, struct ceph_cap, ci_node); 798 if (!__cap_is_valid(cap)) 799 continue; 800 dout("__ceph_caps_issued %p cap %p issued %s\n", 801 &ci->netfs.inode, cap, ceph_cap_string(cap->issued)); 802 have |= cap->issued; 803 if (implemented) 804 *implemented |= cap->implemented; 805 } 806 /* 807 * exclude caps issued by non-auth MDS, but are been revoking 808 * by the auth MDS. The non-auth MDS should be revoking/exporting 809 * these caps, but the message is delayed. 810 */ 811 if (ci->i_auth_cap) { 812 cap = ci->i_auth_cap; 813 have &= ~cap->implemented | cap->issued; 814 } 815 return have; 816 } 817 818 /* 819 * Get cap bits issued by caps other than @ocap 820 */ 821 int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap) 822 { 823 int have = ci->i_snap_caps; 824 struct ceph_cap *cap; 825 struct rb_node *p; 826 827 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 828 cap = rb_entry(p, struct ceph_cap, ci_node); 829 if (cap == ocap) 830 continue; 831 if (!__cap_is_valid(cap)) 832 continue; 833 have |= cap->issued; 834 } 835 return have; 836 } 837 838 /* 839 * Move a cap to the end of the LRU (oldest caps at list head, newest 840 * at list tail). 841 */ 842 static void __touch_cap(struct ceph_cap *cap) 843 { 844 struct ceph_mds_session *s = cap->session; 845 846 spin_lock(&s->s_cap_lock); 847 if (!s->s_cap_iterator) { 848 dout("__touch_cap %p cap %p mds%d\n", &cap->ci->netfs.inode, cap, 849 s->s_mds); 850 list_move_tail(&cap->session_caps, &s->s_caps); 851 } else { 852 dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n", 853 &cap->ci->netfs.inode, cap, s->s_mds); 854 } 855 spin_unlock(&s->s_cap_lock); 856 } 857 858 /* 859 * Check if we hold the given mask. If so, move the cap(s) to the 860 * front of their respective LRUs. (This is the preferred way for 861 * callers to check for caps they want.) 862 */ 863 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) 864 { 865 struct ceph_cap *cap; 866 struct rb_node *p; 867 int have = ci->i_snap_caps; 868 869 if ((have & mask) == mask) { 870 dout("__ceph_caps_issued_mask ino 0x%llx snap issued %s" 871 " (mask %s)\n", ceph_ino(&ci->netfs.inode), 872 ceph_cap_string(have), 873 ceph_cap_string(mask)); 874 return 1; 875 } 876 877 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 878 cap = rb_entry(p, struct ceph_cap, ci_node); 879 if (!__cap_is_valid(cap)) 880 continue; 881 if ((cap->issued & mask) == mask) { 882 dout("__ceph_caps_issued_mask ino 0x%llx cap %p issued %s" 883 " (mask %s)\n", ceph_ino(&ci->netfs.inode), cap, 884 ceph_cap_string(cap->issued), 885 ceph_cap_string(mask)); 886 if (touch) 887 __touch_cap(cap); 888 return 1; 889 } 890 891 /* does a combination of caps satisfy mask? */ 892 have |= cap->issued; 893 if ((have & mask) == mask) { 894 dout("__ceph_caps_issued_mask ino 0x%llx combo issued %s" 895 " (mask %s)\n", ceph_ino(&ci->netfs.inode), 896 ceph_cap_string(cap->issued), 897 ceph_cap_string(mask)); 898 if (touch) { 899 struct rb_node *q; 900 901 /* touch this + preceding caps */ 902 __touch_cap(cap); 903 for (q = rb_first(&ci->i_caps); q != p; 904 q = rb_next(q)) { 905 cap = rb_entry(q, struct ceph_cap, 906 ci_node); 907 if (!__cap_is_valid(cap)) 908 continue; 909 if (cap->issued & mask) 910 __touch_cap(cap); 911 } 912 } 913 return 1; 914 } 915 } 916 917 return 0; 918 } 919 920 int __ceph_caps_issued_mask_metric(struct ceph_inode_info *ci, int mask, 921 int touch) 922 { 923 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb); 924 int r; 925 926 r = __ceph_caps_issued_mask(ci, mask, touch); 927 if (r) 928 ceph_update_cap_hit(&fsc->mdsc->metric); 929 else 930 ceph_update_cap_mis(&fsc->mdsc->metric); 931 return r; 932 } 933 934 /* 935 * Return true if mask caps are currently being revoked by an MDS. 936 */ 937 int __ceph_caps_revoking_other(struct ceph_inode_info *ci, 938 struct ceph_cap *ocap, int mask) 939 { 940 struct ceph_cap *cap; 941 struct rb_node *p; 942 943 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 944 cap = rb_entry(p, struct ceph_cap, ci_node); 945 if (cap != ocap && 946 (cap->implemented & ~cap->issued & mask)) 947 return 1; 948 } 949 return 0; 950 } 951 952 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask) 953 { 954 struct inode *inode = &ci->netfs.inode; 955 int ret; 956 957 spin_lock(&ci->i_ceph_lock); 958 ret = __ceph_caps_revoking_other(ci, NULL, mask); 959 spin_unlock(&ci->i_ceph_lock); 960 dout("ceph_caps_revoking %p %s = %d\n", inode, 961 ceph_cap_string(mask), ret); 962 return ret; 963 } 964 965 int __ceph_caps_used(struct ceph_inode_info *ci) 966 { 967 int used = 0; 968 if (ci->i_pin_ref) 969 used |= CEPH_CAP_PIN; 970 if (ci->i_rd_ref) 971 used |= CEPH_CAP_FILE_RD; 972 if (ci->i_rdcache_ref || 973 (S_ISREG(ci->netfs.inode.i_mode) && 974 ci->netfs.inode.i_data.nrpages)) 975 used |= CEPH_CAP_FILE_CACHE; 976 if (ci->i_wr_ref) 977 used |= CEPH_CAP_FILE_WR; 978 if (ci->i_wb_ref || ci->i_wrbuffer_ref) 979 used |= CEPH_CAP_FILE_BUFFER; 980 if (ci->i_fx_ref) 981 used |= CEPH_CAP_FILE_EXCL; 982 return used; 983 } 984 985 #define FMODE_WAIT_BIAS 1000 986 987 /* 988 * wanted, by virtue of open file modes 989 */ 990 int __ceph_caps_file_wanted(struct ceph_inode_info *ci) 991 { 992 const int PIN_SHIFT = ffs(CEPH_FILE_MODE_PIN); 993 const int RD_SHIFT = ffs(CEPH_FILE_MODE_RD); 994 const int WR_SHIFT = ffs(CEPH_FILE_MODE_WR); 995 const int LAZY_SHIFT = ffs(CEPH_FILE_MODE_LAZY); 996 struct ceph_mount_options *opt = 997 ceph_inode_to_client(&ci->netfs.inode)->mount_options; 998 unsigned long used_cutoff = jiffies - opt->caps_wanted_delay_max * HZ; 999 unsigned long idle_cutoff = jiffies - opt->caps_wanted_delay_min * HZ; 1000 1001 if (S_ISDIR(ci->netfs.inode.i_mode)) { 1002 int want = 0; 1003 1004 /* use used_cutoff here, to keep dir's wanted caps longer */ 1005 if (ci->i_nr_by_mode[RD_SHIFT] > 0 || 1006 time_after(ci->i_last_rd, used_cutoff)) 1007 want |= CEPH_CAP_ANY_SHARED; 1008 1009 if (ci->i_nr_by_mode[WR_SHIFT] > 0 || 1010 time_after(ci->i_last_wr, used_cutoff)) { 1011 want |= CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL; 1012 if (opt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS) 1013 want |= CEPH_CAP_ANY_DIR_OPS; 1014 } 1015 1016 if (want || ci->i_nr_by_mode[PIN_SHIFT] > 0) 1017 want |= CEPH_CAP_PIN; 1018 1019 return want; 1020 } else { 1021 int bits = 0; 1022 1023 if (ci->i_nr_by_mode[RD_SHIFT] > 0) { 1024 if (ci->i_nr_by_mode[RD_SHIFT] >= FMODE_WAIT_BIAS || 1025 time_after(ci->i_last_rd, used_cutoff)) 1026 bits |= 1 << RD_SHIFT; 1027 } else if (time_after(ci->i_last_rd, idle_cutoff)) { 1028 bits |= 1 << RD_SHIFT; 1029 } 1030 1031 if (ci->i_nr_by_mode[WR_SHIFT] > 0) { 1032 if (ci->i_nr_by_mode[WR_SHIFT] >= FMODE_WAIT_BIAS || 1033 time_after(ci->i_last_wr, used_cutoff)) 1034 bits |= 1 << WR_SHIFT; 1035 } else if (time_after(ci->i_last_wr, idle_cutoff)) { 1036 bits |= 1 << WR_SHIFT; 1037 } 1038 1039 /* check lazyio only when read/write is wanted */ 1040 if ((bits & (CEPH_FILE_MODE_RDWR << 1)) && 1041 ci->i_nr_by_mode[LAZY_SHIFT] > 0) 1042 bits |= 1 << LAZY_SHIFT; 1043 1044 return bits ? ceph_caps_for_mode(bits >> 1) : 0; 1045 } 1046 } 1047 1048 /* 1049 * wanted, by virtue of open file modes AND cap refs (buffered/cached data) 1050 */ 1051 int __ceph_caps_wanted(struct ceph_inode_info *ci) 1052 { 1053 int w = __ceph_caps_file_wanted(ci) | __ceph_caps_used(ci); 1054 if (S_ISDIR(ci->netfs.inode.i_mode)) { 1055 /* we want EXCL if holding caps of dir ops */ 1056 if (w & CEPH_CAP_ANY_DIR_OPS) 1057 w |= CEPH_CAP_FILE_EXCL; 1058 } else { 1059 /* we want EXCL if dirty data */ 1060 if (w & CEPH_CAP_FILE_BUFFER) 1061 w |= CEPH_CAP_FILE_EXCL; 1062 } 1063 return w; 1064 } 1065 1066 /* 1067 * Return caps we have registered with the MDS(s) as 'wanted'. 1068 */ 1069 int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check) 1070 { 1071 struct ceph_cap *cap; 1072 struct rb_node *p; 1073 int mds_wanted = 0; 1074 1075 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 1076 cap = rb_entry(p, struct ceph_cap, ci_node); 1077 if (check && !__cap_is_valid(cap)) 1078 continue; 1079 if (cap == ci->i_auth_cap) 1080 mds_wanted |= cap->mds_wanted; 1081 else 1082 mds_wanted |= (cap->mds_wanted & ~CEPH_CAP_ANY_FILE_WR); 1083 } 1084 return mds_wanted; 1085 } 1086 1087 int ceph_is_any_caps(struct inode *inode) 1088 { 1089 struct ceph_inode_info *ci = ceph_inode(inode); 1090 int ret; 1091 1092 spin_lock(&ci->i_ceph_lock); 1093 ret = __ceph_is_any_real_caps(ci); 1094 spin_unlock(&ci->i_ceph_lock); 1095 1096 return ret; 1097 } 1098 1099 /* 1100 * Remove a cap. Take steps to deal with a racing iterate_session_caps. 1101 * 1102 * caller should hold i_ceph_lock. 1103 * caller will not hold session s_mutex if called from destroy_inode. 1104 */ 1105 void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) 1106 { 1107 struct ceph_mds_session *session = cap->session; 1108 struct ceph_inode_info *ci = cap->ci; 1109 struct ceph_mds_client *mdsc; 1110 int removed = 0; 1111 1112 /* 'ci' being NULL means the remove have already occurred */ 1113 if (!ci) { 1114 dout("%s: cap inode is NULL\n", __func__); 1115 return; 1116 } 1117 1118 lockdep_assert_held(&ci->i_ceph_lock); 1119 1120 dout("__ceph_remove_cap %p from %p\n", cap, &ci->netfs.inode); 1121 1122 mdsc = ceph_inode_to_client(&ci->netfs.inode)->mdsc; 1123 1124 /* remove from inode's cap rbtree, and clear auth cap */ 1125 rb_erase(&cap->ci_node, &ci->i_caps); 1126 if (ci->i_auth_cap == cap) 1127 ci->i_auth_cap = NULL; 1128 1129 /* remove from session list */ 1130 spin_lock(&session->s_cap_lock); 1131 if (session->s_cap_iterator == cap) { 1132 /* not yet, we are iterating over this very cap */ 1133 dout("__ceph_remove_cap delaying %p removal from session %p\n", 1134 cap, cap->session); 1135 } else { 1136 list_del_init(&cap->session_caps); 1137 session->s_nr_caps--; 1138 atomic64_dec(&mdsc->metric.total_caps); 1139 cap->session = NULL; 1140 removed = 1; 1141 } 1142 /* protect backpointer with s_cap_lock: see iterate_session_caps */ 1143 cap->ci = NULL; 1144 1145 /* 1146 * s_cap_reconnect is protected by s_cap_lock. no one changes 1147 * s_cap_gen while session is in the reconnect state. 1148 */ 1149 if (queue_release && 1150 (!session->s_cap_reconnect || 1151 cap->cap_gen == atomic_read(&session->s_cap_gen))) { 1152 cap->queue_release = 1; 1153 if (removed) { 1154 __ceph_queue_cap_release(session, cap); 1155 removed = 0; 1156 } 1157 } else { 1158 cap->queue_release = 0; 1159 } 1160 cap->cap_ino = ci->i_vino.ino; 1161 1162 spin_unlock(&session->s_cap_lock); 1163 1164 if (removed) 1165 ceph_put_cap(mdsc, cap); 1166 1167 if (!__ceph_is_any_real_caps(ci)) { 1168 /* when reconnect denied, we remove session caps forcibly, 1169 * i_wr_ref can be non-zero. If there are ongoing write, 1170 * keep i_snap_realm. 1171 */ 1172 if (ci->i_wr_ref == 0 && ci->i_snap_realm) 1173 ceph_change_snap_realm(&ci->netfs.inode, NULL); 1174 1175 __cap_delay_cancel(mdsc, ci); 1176 } 1177 } 1178 1179 void ceph_remove_cap(struct ceph_cap *cap, bool queue_release) 1180 { 1181 struct ceph_inode_info *ci = cap->ci; 1182 struct ceph_fs_client *fsc; 1183 1184 /* 'ci' being NULL means the remove have already occurred */ 1185 if (!ci) { 1186 dout("%s: cap inode is NULL\n", __func__); 1187 return; 1188 } 1189 1190 lockdep_assert_held(&ci->i_ceph_lock); 1191 1192 fsc = ceph_inode_to_client(&ci->netfs.inode); 1193 WARN_ON_ONCE(ci->i_auth_cap == cap && 1194 !list_empty(&ci->i_dirty_item) && 1195 !fsc->blocklisted && 1196 !ceph_inode_is_shutdown(&ci->netfs.inode)); 1197 1198 __ceph_remove_cap(cap, queue_release); 1199 } 1200 1201 struct cap_msg_args { 1202 struct ceph_mds_session *session; 1203 u64 ino, cid, follows; 1204 u64 flush_tid, oldest_flush_tid, size, max_size; 1205 u64 xattr_version; 1206 u64 change_attr; 1207 struct ceph_buffer *xattr_buf; 1208 struct ceph_buffer *old_xattr_buf; 1209 struct timespec64 atime, mtime, ctime, btime; 1210 int op, caps, wanted, dirty; 1211 u32 seq, issue_seq, mseq, time_warp_seq; 1212 u32 flags; 1213 kuid_t uid; 1214 kgid_t gid; 1215 umode_t mode; 1216 bool inline_data; 1217 bool wake; 1218 }; 1219 1220 /* 1221 * cap struct size + flock buffer size + inline version + inline data size + 1222 * osd_epoch_barrier + oldest_flush_tid 1223 */ 1224 #define CAP_MSG_SIZE (sizeof(struct ceph_mds_caps) + \ 1225 4 + 8 + 4 + 4 + 8 + 4 + 4 + 4 + 8 + 8 + 4) 1226 1227 /* Marshal up the cap msg to the MDS */ 1228 static void encode_cap_msg(struct ceph_msg *msg, struct cap_msg_args *arg) 1229 { 1230 struct ceph_mds_caps *fc; 1231 void *p; 1232 struct ceph_osd_client *osdc = &arg->session->s_mdsc->fsc->client->osdc; 1233 1234 dout("%s %s %llx %llx caps %s wanted %s dirty %s seq %u/%u tid %llu/%llu mseq %u follows %lld size %llu/%llu xattr_ver %llu xattr_len %d\n", 1235 __func__, ceph_cap_op_name(arg->op), arg->cid, arg->ino, 1236 ceph_cap_string(arg->caps), ceph_cap_string(arg->wanted), 1237 ceph_cap_string(arg->dirty), arg->seq, arg->issue_seq, 1238 arg->flush_tid, arg->oldest_flush_tid, arg->mseq, arg->follows, 1239 arg->size, arg->max_size, arg->xattr_version, 1240 arg->xattr_buf ? (int)arg->xattr_buf->vec.iov_len : 0); 1241 1242 msg->hdr.version = cpu_to_le16(10); 1243 msg->hdr.tid = cpu_to_le64(arg->flush_tid); 1244 1245 fc = msg->front.iov_base; 1246 memset(fc, 0, sizeof(*fc)); 1247 1248 fc->cap_id = cpu_to_le64(arg->cid); 1249 fc->op = cpu_to_le32(arg->op); 1250 fc->seq = cpu_to_le32(arg->seq); 1251 fc->issue_seq = cpu_to_le32(arg->issue_seq); 1252 fc->migrate_seq = cpu_to_le32(arg->mseq); 1253 fc->caps = cpu_to_le32(arg->caps); 1254 fc->wanted = cpu_to_le32(arg->wanted); 1255 fc->dirty = cpu_to_le32(arg->dirty); 1256 fc->ino = cpu_to_le64(arg->ino); 1257 fc->snap_follows = cpu_to_le64(arg->follows); 1258 1259 fc->size = cpu_to_le64(arg->size); 1260 fc->max_size = cpu_to_le64(arg->max_size); 1261 ceph_encode_timespec64(&fc->mtime, &arg->mtime); 1262 ceph_encode_timespec64(&fc->atime, &arg->atime); 1263 ceph_encode_timespec64(&fc->ctime, &arg->ctime); 1264 fc->time_warp_seq = cpu_to_le32(arg->time_warp_seq); 1265 1266 fc->uid = cpu_to_le32(from_kuid(&init_user_ns, arg->uid)); 1267 fc->gid = cpu_to_le32(from_kgid(&init_user_ns, arg->gid)); 1268 fc->mode = cpu_to_le32(arg->mode); 1269 1270 fc->xattr_version = cpu_to_le64(arg->xattr_version); 1271 if (arg->xattr_buf) { 1272 msg->middle = ceph_buffer_get(arg->xattr_buf); 1273 fc->xattr_len = cpu_to_le32(arg->xattr_buf->vec.iov_len); 1274 msg->hdr.middle_len = cpu_to_le32(arg->xattr_buf->vec.iov_len); 1275 } 1276 1277 p = fc + 1; 1278 /* flock buffer size (version 2) */ 1279 ceph_encode_32(&p, 0); 1280 /* inline version (version 4) */ 1281 ceph_encode_64(&p, arg->inline_data ? 0 : CEPH_INLINE_NONE); 1282 /* inline data size */ 1283 ceph_encode_32(&p, 0); 1284 /* 1285 * osd_epoch_barrier (version 5) 1286 * The epoch_barrier is protected osdc->lock, so READ_ONCE here in 1287 * case it was recently changed 1288 */ 1289 ceph_encode_32(&p, READ_ONCE(osdc->epoch_barrier)); 1290 /* oldest_flush_tid (version 6) */ 1291 ceph_encode_64(&p, arg->oldest_flush_tid); 1292 1293 /* 1294 * caller_uid/caller_gid (version 7) 1295 * 1296 * Currently, we don't properly track which caller dirtied the caps 1297 * last, and force a flush of them when there is a conflict. For now, 1298 * just set this to 0:0, to emulate how the MDS has worked up to now. 1299 */ 1300 ceph_encode_32(&p, 0); 1301 ceph_encode_32(&p, 0); 1302 1303 /* pool namespace (version 8) (mds always ignores this) */ 1304 ceph_encode_32(&p, 0); 1305 1306 /* btime and change_attr (version 9) */ 1307 ceph_encode_timespec64(p, &arg->btime); 1308 p += sizeof(struct ceph_timespec); 1309 ceph_encode_64(&p, arg->change_attr); 1310 1311 /* Advisory flags (version 10) */ 1312 ceph_encode_32(&p, arg->flags); 1313 } 1314 1315 /* 1316 * Queue cap releases when an inode is dropped from our cache. 1317 */ 1318 void __ceph_remove_caps(struct ceph_inode_info *ci) 1319 { 1320 struct rb_node *p; 1321 1322 /* lock i_ceph_lock, because ceph_d_revalidate(..., LOOKUP_RCU) 1323 * may call __ceph_caps_issued_mask() on a freeing inode. */ 1324 spin_lock(&ci->i_ceph_lock); 1325 p = rb_first(&ci->i_caps); 1326 while (p) { 1327 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node); 1328 p = rb_next(p); 1329 ceph_remove_cap(cap, true); 1330 } 1331 spin_unlock(&ci->i_ceph_lock); 1332 } 1333 1334 /* 1335 * Prepare to send a cap message to an MDS. Update the cap state, and populate 1336 * the arg struct with the parameters that will need to be sent. This should 1337 * be done under the i_ceph_lock to guard against changes to cap state. 1338 * 1339 * Make note of max_size reported/requested from mds, revoked caps 1340 * that have now been implemented. 1341 */ 1342 static void __prep_cap(struct cap_msg_args *arg, struct ceph_cap *cap, 1343 int op, int flags, int used, int want, int retain, 1344 int flushing, u64 flush_tid, u64 oldest_flush_tid) 1345 { 1346 struct ceph_inode_info *ci = cap->ci; 1347 struct inode *inode = &ci->netfs.inode; 1348 int held, revoking; 1349 1350 lockdep_assert_held(&ci->i_ceph_lock); 1351 1352 held = cap->issued | cap->implemented; 1353 revoking = cap->implemented & ~cap->issued; 1354 retain &= ~revoking; 1355 1356 dout("%s %p cap %p session %p %s -> %s (revoking %s)\n", 1357 __func__, inode, cap, cap->session, 1358 ceph_cap_string(held), ceph_cap_string(held & retain), 1359 ceph_cap_string(revoking)); 1360 BUG_ON((retain & CEPH_CAP_PIN) == 0); 1361 1362 ci->i_ceph_flags &= ~CEPH_I_FLUSH; 1363 1364 cap->issued &= retain; /* drop bits we don't want */ 1365 /* 1366 * Wake up any waiters on wanted -> needed transition. This is due to 1367 * the weird transition from buffered to sync IO... we need to flush 1368 * dirty pages _before_ allowing sync writes to avoid reordering. 1369 */ 1370 arg->wake = cap->implemented & ~cap->issued; 1371 cap->implemented &= cap->issued | used; 1372 cap->mds_wanted = want; 1373 1374 arg->session = cap->session; 1375 arg->ino = ceph_vino(inode).ino; 1376 arg->cid = cap->cap_id; 1377 arg->follows = flushing ? ci->i_head_snapc->seq : 0; 1378 arg->flush_tid = flush_tid; 1379 arg->oldest_flush_tid = oldest_flush_tid; 1380 1381 arg->size = i_size_read(inode); 1382 ci->i_reported_size = arg->size; 1383 arg->max_size = ci->i_wanted_max_size; 1384 if (cap == ci->i_auth_cap) { 1385 if (want & CEPH_CAP_ANY_FILE_WR) 1386 ci->i_requested_max_size = arg->max_size; 1387 else 1388 ci->i_requested_max_size = 0; 1389 } 1390 1391 if (flushing & CEPH_CAP_XATTR_EXCL) { 1392 arg->old_xattr_buf = __ceph_build_xattrs_blob(ci); 1393 arg->xattr_version = ci->i_xattrs.version; 1394 arg->xattr_buf = ci->i_xattrs.blob; 1395 } else { 1396 arg->xattr_buf = NULL; 1397 arg->old_xattr_buf = NULL; 1398 } 1399 1400 arg->mtime = inode->i_mtime; 1401 arg->atime = inode->i_atime; 1402 arg->ctime = inode->i_ctime; 1403 arg->btime = ci->i_btime; 1404 arg->change_attr = inode_peek_iversion_raw(inode); 1405 1406 arg->op = op; 1407 arg->caps = cap->implemented; 1408 arg->wanted = want; 1409 arg->dirty = flushing; 1410 1411 arg->seq = cap->seq; 1412 arg->issue_seq = cap->issue_seq; 1413 arg->mseq = cap->mseq; 1414 arg->time_warp_seq = ci->i_time_warp_seq; 1415 1416 arg->uid = inode->i_uid; 1417 arg->gid = inode->i_gid; 1418 arg->mode = inode->i_mode; 1419 1420 arg->inline_data = ci->i_inline_version != CEPH_INLINE_NONE; 1421 if (!(flags & CEPH_CLIENT_CAPS_PENDING_CAPSNAP) && 1422 !list_empty(&ci->i_cap_snaps)) { 1423 struct ceph_cap_snap *capsnap; 1424 list_for_each_entry_reverse(capsnap, &ci->i_cap_snaps, ci_item) { 1425 if (capsnap->cap_flush.tid) 1426 break; 1427 if (capsnap->need_flush) { 1428 flags |= CEPH_CLIENT_CAPS_PENDING_CAPSNAP; 1429 break; 1430 } 1431 } 1432 } 1433 arg->flags = flags; 1434 } 1435 1436 /* 1437 * Send a cap msg on the given inode. 1438 * 1439 * Caller should hold snap_rwsem (read), s_mutex. 1440 */ 1441 static void __send_cap(struct cap_msg_args *arg, struct ceph_inode_info *ci) 1442 { 1443 struct ceph_msg *msg; 1444 struct inode *inode = &ci->netfs.inode; 1445 1446 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, CAP_MSG_SIZE, GFP_NOFS, false); 1447 if (!msg) { 1448 pr_err("error allocating cap msg: ino (%llx.%llx) flushing %s tid %llu, requeuing cap.\n", 1449 ceph_vinop(inode), ceph_cap_string(arg->dirty), 1450 arg->flush_tid); 1451 spin_lock(&ci->i_ceph_lock); 1452 __cap_delay_requeue(arg->session->s_mdsc, ci); 1453 spin_unlock(&ci->i_ceph_lock); 1454 return; 1455 } 1456 1457 encode_cap_msg(msg, arg); 1458 ceph_con_send(&arg->session->s_con, msg); 1459 ceph_buffer_put(arg->old_xattr_buf); 1460 if (arg->wake) 1461 wake_up_all(&ci->i_cap_wq); 1462 } 1463 1464 static inline int __send_flush_snap(struct inode *inode, 1465 struct ceph_mds_session *session, 1466 struct ceph_cap_snap *capsnap, 1467 u32 mseq, u64 oldest_flush_tid) 1468 { 1469 struct cap_msg_args arg; 1470 struct ceph_msg *msg; 1471 1472 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, CAP_MSG_SIZE, GFP_NOFS, false); 1473 if (!msg) 1474 return -ENOMEM; 1475 1476 arg.session = session; 1477 arg.ino = ceph_vino(inode).ino; 1478 arg.cid = 0; 1479 arg.follows = capsnap->follows; 1480 arg.flush_tid = capsnap->cap_flush.tid; 1481 arg.oldest_flush_tid = oldest_flush_tid; 1482 1483 arg.size = capsnap->size; 1484 arg.max_size = 0; 1485 arg.xattr_version = capsnap->xattr_version; 1486 arg.xattr_buf = capsnap->xattr_blob; 1487 arg.old_xattr_buf = NULL; 1488 1489 arg.atime = capsnap->atime; 1490 arg.mtime = capsnap->mtime; 1491 arg.ctime = capsnap->ctime; 1492 arg.btime = capsnap->btime; 1493 arg.change_attr = capsnap->change_attr; 1494 1495 arg.op = CEPH_CAP_OP_FLUSHSNAP; 1496 arg.caps = capsnap->issued; 1497 arg.wanted = 0; 1498 arg.dirty = capsnap->dirty; 1499 1500 arg.seq = 0; 1501 arg.issue_seq = 0; 1502 arg.mseq = mseq; 1503 arg.time_warp_seq = capsnap->time_warp_seq; 1504 1505 arg.uid = capsnap->uid; 1506 arg.gid = capsnap->gid; 1507 arg.mode = capsnap->mode; 1508 1509 arg.inline_data = capsnap->inline_data; 1510 arg.flags = 0; 1511 arg.wake = false; 1512 1513 encode_cap_msg(msg, &arg); 1514 ceph_con_send(&arg.session->s_con, msg); 1515 return 0; 1516 } 1517 1518 /* 1519 * When a snapshot is taken, clients accumulate dirty metadata on 1520 * inodes with capabilities in ceph_cap_snaps to describe the file 1521 * state at the time the snapshot was taken. This must be flushed 1522 * asynchronously back to the MDS once sync writes complete and dirty 1523 * data is written out. 1524 * 1525 * Called under i_ceph_lock. 1526 */ 1527 static void __ceph_flush_snaps(struct ceph_inode_info *ci, 1528 struct ceph_mds_session *session) 1529 __releases(ci->i_ceph_lock) 1530 __acquires(ci->i_ceph_lock) 1531 { 1532 struct inode *inode = &ci->netfs.inode; 1533 struct ceph_mds_client *mdsc = session->s_mdsc; 1534 struct ceph_cap_snap *capsnap; 1535 u64 oldest_flush_tid = 0; 1536 u64 first_tid = 1, last_tid = 0; 1537 1538 dout("__flush_snaps %p session %p\n", inode, session); 1539 1540 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) { 1541 /* 1542 * we need to wait for sync writes to complete and for dirty 1543 * pages to be written out. 1544 */ 1545 if (capsnap->dirty_pages || capsnap->writing) 1546 break; 1547 1548 /* should be removed by ceph_try_drop_cap_snap() */ 1549 BUG_ON(!capsnap->need_flush); 1550 1551 /* only flush each capsnap once */ 1552 if (capsnap->cap_flush.tid > 0) { 1553 dout(" already flushed %p, skipping\n", capsnap); 1554 continue; 1555 } 1556 1557 spin_lock(&mdsc->cap_dirty_lock); 1558 capsnap->cap_flush.tid = ++mdsc->last_cap_flush_tid; 1559 list_add_tail(&capsnap->cap_flush.g_list, 1560 &mdsc->cap_flush_list); 1561 if (oldest_flush_tid == 0) 1562 oldest_flush_tid = __get_oldest_flush_tid(mdsc); 1563 if (list_empty(&ci->i_flushing_item)) { 1564 list_add_tail(&ci->i_flushing_item, 1565 &session->s_cap_flushing); 1566 } 1567 spin_unlock(&mdsc->cap_dirty_lock); 1568 1569 list_add_tail(&capsnap->cap_flush.i_list, 1570 &ci->i_cap_flush_list); 1571 1572 if (first_tid == 1) 1573 first_tid = capsnap->cap_flush.tid; 1574 last_tid = capsnap->cap_flush.tid; 1575 } 1576 1577 ci->i_ceph_flags &= ~CEPH_I_FLUSH_SNAPS; 1578 1579 while (first_tid <= last_tid) { 1580 struct ceph_cap *cap = ci->i_auth_cap; 1581 struct ceph_cap_flush *cf = NULL, *iter; 1582 int ret; 1583 1584 if (!(cap && cap->session == session)) { 1585 dout("__flush_snaps %p auth cap %p not mds%d, " 1586 "stop\n", inode, cap, session->s_mds); 1587 break; 1588 } 1589 1590 ret = -ENOENT; 1591 list_for_each_entry(iter, &ci->i_cap_flush_list, i_list) { 1592 if (iter->tid >= first_tid) { 1593 cf = iter; 1594 ret = 0; 1595 break; 1596 } 1597 } 1598 if (ret < 0) 1599 break; 1600 1601 first_tid = cf->tid + 1; 1602 1603 capsnap = container_of(cf, struct ceph_cap_snap, cap_flush); 1604 refcount_inc(&capsnap->nref); 1605 spin_unlock(&ci->i_ceph_lock); 1606 1607 dout("__flush_snaps %p capsnap %p tid %llu %s\n", 1608 inode, capsnap, cf->tid, ceph_cap_string(capsnap->dirty)); 1609 1610 ret = __send_flush_snap(inode, session, capsnap, cap->mseq, 1611 oldest_flush_tid); 1612 if (ret < 0) { 1613 pr_err("__flush_snaps: error sending cap flushsnap, " 1614 "ino (%llx.%llx) tid %llu follows %llu\n", 1615 ceph_vinop(inode), cf->tid, capsnap->follows); 1616 } 1617 1618 ceph_put_cap_snap(capsnap); 1619 spin_lock(&ci->i_ceph_lock); 1620 } 1621 } 1622 1623 void ceph_flush_snaps(struct ceph_inode_info *ci, 1624 struct ceph_mds_session **psession) 1625 { 1626 struct inode *inode = &ci->netfs.inode; 1627 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 1628 struct ceph_mds_session *session = NULL; 1629 int mds; 1630 1631 dout("ceph_flush_snaps %p\n", inode); 1632 if (psession) 1633 session = *psession; 1634 retry: 1635 spin_lock(&ci->i_ceph_lock); 1636 if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) { 1637 dout(" no capsnap needs flush, doing nothing\n"); 1638 goto out; 1639 } 1640 if (!ci->i_auth_cap) { 1641 dout(" no auth cap (migrating?), doing nothing\n"); 1642 goto out; 1643 } 1644 1645 mds = ci->i_auth_cap->session->s_mds; 1646 if (session && session->s_mds != mds) { 1647 dout(" oops, wrong session %p mutex\n", session); 1648 ceph_put_mds_session(session); 1649 session = NULL; 1650 } 1651 if (!session) { 1652 spin_unlock(&ci->i_ceph_lock); 1653 mutex_lock(&mdsc->mutex); 1654 session = __ceph_lookup_mds_session(mdsc, mds); 1655 mutex_unlock(&mdsc->mutex); 1656 goto retry; 1657 } 1658 1659 // make sure flushsnap messages are sent in proper order. 1660 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) 1661 __kick_flushing_caps(mdsc, session, ci, 0); 1662 1663 __ceph_flush_snaps(ci, session); 1664 out: 1665 spin_unlock(&ci->i_ceph_lock); 1666 1667 if (psession) 1668 *psession = session; 1669 else 1670 ceph_put_mds_session(session); 1671 /* we flushed them all; remove this inode from the queue */ 1672 spin_lock(&mdsc->snap_flush_lock); 1673 list_del_init(&ci->i_snap_flush_item); 1674 spin_unlock(&mdsc->snap_flush_lock); 1675 } 1676 1677 /* 1678 * Mark caps dirty. If inode is newly dirty, return the dirty flags. 1679 * Caller is then responsible for calling __mark_inode_dirty with the 1680 * returned flags value. 1681 */ 1682 int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask, 1683 struct ceph_cap_flush **pcf) 1684 { 1685 struct ceph_mds_client *mdsc = 1686 ceph_sb_to_client(ci->netfs.inode.i_sb)->mdsc; 1687 struct inode *inode = &ci->netfs.inode; 1688 int was = ci->i_dirty_caps; 1689 int dirty = 0; 1690 1691 lockdep_assert_held(&ci->i_ceph_lock); 1692 1693 if (!ci->i_auth_cap) { 1694 pr_warn("__mark_dirty_caps %p %llx mask %s, " 1695 "but no auth cap (session was closed?)\n", 1696 inode, ceph_ino(inode), ceph_cap_string(mask)); 1697 return 0; 1698 } 1699 1700 dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->netfs.inode, 1701 ceph_cap_string(mask), ceph_cap_string(was), 1702 ceph_cap_string(was | mask)); 1703 ci->i_dirty_caps |= mask; 1704 if (was == 0) { 1705 struct ceph_mds_session *session = ci->i_auth_cap->session; 1706 1707 WARN_ON_ONCE(ci->i_prealloc_cap_flush); 1708 swap(ci->i_prealloc_cap_flush, *pcf); 1709 1710 if (!ci->i_head_snapc) { 1711 WARN_ON_ONCE(!rwsem_is_locked(&mdsc->snap_rwsem)); 1712 ci->i_head_snapc = ceph_get_snap_context( 1713 ci->i_snap_realm->cached_context); 1714 } 1715 dout(" inode %p now dirty snapc %p auth cap %p\n", 1716 &ci->netfs.inode, ci->i_head_snapc, ci->i_auth_cap); 1717 BUG_ON(!list_empty(&ci->i_dirty_item)); 1718 spin_lock(&mdsc->cap_dirty_lock); 1719 list_add(&ci->i_dirty_item, &session->s_cap_dirty); 1720 spin_unlock(&mdsc->cap_dirty_lock); 1721 if (ci->i_flushing_caps == 0) { 1722 ihold(inode); 1723 dirty |= I_DIRTY_SYNC; 1724 } 1725 } else { 1726 WARN_ON_ONCE(!ci->i_prealloc_cap_flush); 1727 } 1728 BUG_ON(list_empty(&ci->i_dirty_item)); 1729 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) && 1730 (mask & CEPH_CAP_FILE_BUFFER)) 1731 dirty |= I_DIRTY_DATASYNC; 1732 __cap_delay_requeue(mdsc, ci); 1733 return dirty; 1734 } 1735 1736 struct ceph_cap_flush *ceph_alloc_cap_flush(void) 1737 { 1738 struct ceph_cap_flush *cf; 1739 1740 cf = kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL); 1741 if (!cf) 1742 return NULL; 1743 1744 cf->is_capsnap = false; 1745 return cf; 1746 } 1747 1748 void ceph_free_cap_flush(struct ceph_cap_flush *cf) 1749 { 1750 if (cf) 1751 kmem_cache_free(ceph_cap_flush_cachep, cf); 1752 } 1753 1754 static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc) 1755 { 1756 if (!list_empty(&mdsc->cap_flush_list)) { 1757 struct ceph_cap_flush *cf = 1758 list_first_entry(&mdsc->cap_flush_list, 1759 struct ceph_cap_flush, g_list); 1760 return cf->tid; 1761 } 1762 return 0; 1763 } 1764 1765 /* 1766 * Remove cap_flush from the mdsc's or inode's flushing cap list. 1767 * Return true if caller needs to wake up flush waiters. 1768 */ 1769 static bool __detach_cap_flush_from_mdsc(struct ceph_mds_client *mdsc, 1770 struct ceph_cap_flush *cf) 1771 { 1772 struct ceph_cap_flush *prev; 1773 bool wake = cf->wake; 1774 1775 if (wake && cf->g_list.prev != &mdsc->cap_flush_list) { 1776 prev = list_prev_entry(cf, g_list); 1777 prev->wake = true; 1778 wake = false; 1779 } 1780 list_del_init(&cf->g_list); 1781 return wake; 1782 } 1783 1784 static bool __detach_cap_flush_from_ci(struct ceph_inode_info *ci, 1785 struct ceph_cap_flush *cf) 1786 { 1787 struct ceph_cap_flush *prev; 1788 bool wake = cf->wake; 1789 1790 if (wake && cf->i_list.prev != &ci->i_cap_flush_list) { 1791 prev = list_prev_entry(cf, i_list); 1792 prev->wake = true; 1793 wake = false; 1794 } 1795 list_del_init(&cf->i_list); 1796 return wake; 1797 } 1798 1799 /* 1800 * Add dirty inode to the flushing list. Assigned a seq number so we 1801 * can wait for caps to flush without starving. 1802 * 1803 * Called under i_ceph_lock. Returns the flush tid. 1804 */ 1805 static u64 __mark_caps_flushing(struct inode *inode, 1806 struct ceph_mds_session *session, bool wake, 1807 u64 *oldest_flush_tid) 1808 { 1809 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 1810 struct ceph_inode_info *ci = ceph_inode(inode); 1811 struct ceph_cap_flush *cf = NULL; 1812 int flushing; 1813 1814 lockdep_assert_held(&ci->i_ceph_lock); 1815 BUG_ON(ci->i_dirty_caps == 0); 1816 BUG_ON(list_empty(&ci->i_dirty_item)); 1817 BUG_ON(!ci->i_prealloc_cap_flush); 1818 1819 flushing = ci->i_dirty_caps; 1820 dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n", 1821 ceph_cap_string(flushing), 1822 ceph_cap_string(ci->i_flushing_caps), 1823 ceph_cap_string(ci->i_flushing_caps | flushing)); 1824 ci->i_flushing_caps |= flushing; 1825 ci->i_dirty_caps = 0; 1826 dout(" inode %p now !dirty\n", inode); 1827 1828 swap(cf, ci->i_prealloc_cap_flush); 1829 cf->caps = flushing; 1830 cf->wake = wake; 1831 1832 spin_lock(&mdsc->cap_dirty_lock); 1833 list_del_init(&ci->i_dirty_item); 1834 1835 cf->tid = ++mdsc->last_cap_flush_tid; 1836 list_add_tail(&cf->g_list, &mdsc->cap_flush_list); 1837 *oldest_flush_tid = __get_oldest_flush_tid(mdsc); 1838 1839 if (list_empty(&ci->i_flushing_item)) { 1840 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing); 1841 mdsc->num_cap_flushing++; 1842 } 1843 spin_unlock(&mdsc->cap_dirty_lock); 1844 1845 list_add_tail(&cf->i_list, &ci->i_cap_flush_list); 1846 1847 return cf->tid; 1848 } 1849 1850 /* 1851 * try to invalidate mapping pages without blocking. 1852 */ 1853 static int try_nonblocking_invalidate(struct inode *inode) 1854 __releases(ci->i_ceph_lock) 1855 __acquires(ci->i_ceph_lock) 1856 { 1857 struct ceph_inode_info *ci = ceph_inode(inode); 1858 u32 invalidating_gen = ci->i_rdcache_gen; 1859 1860 spin_unlock(&ci->i_ceph_lock); 1861 ceph_fscache_invalidate(inode, false); 1862 invalidate_mapping_pages(&inode->i_data, 0, -1); 1863 spin_lock(&ci->i_ceph_lock); 1864 1865 if (inode->i_data.nrpages == 0 && 1866 invalidating_gen == ci->i_rdcache_gen) { 1867 /* success. */ 1868 dout("try_nonblocking_invalidate %p success\n", inode); 1869 /* save any racing async invalidate some trouble */ 1870 ci->i_rdcache_revoking = ci->i_rdcache_gen - 1; 1871 return 0; 1872 } 1873 dout("try_nonblocking_invalidate %p failed\n", inode); 1874 return -1; 1875 } 1876 1877 bool __ceph_should_report_size(struct ceph_inode_info *ci) 1878 { 1879 loff_t size = i_size_read(&ci->netfs.inode); 1880 /* mds will adjust max size according to the reported size */ 1881 if (ci->i_flushing_caps & CEPH_CAP_FILE_WR) 1882 return false; 1883 if (size >= ci->i_max_size) 1884 return true; 1885 /* half of previous max_size increment has been used */ 1886 if (ci->i_max_size > ci->i_reported_size && 1887 (size << 1) >= ci->i_max_size + ci->i_reported_size) 1888 return true; 1889 return false; 1890 } 1891 1892 /* 1893 * Swiss army knife function to examine currently used and wanted 1894 * versus held caps. Release, flush, ack revoked caps to mds as 1895 * appropriate. 1896 * 1897 * CHECK_CAPS_AUTHONLY - we should only check the auth cap 1898 * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without 1899 * further delay. 1900 */ 1901 void ceph_check_caps(struct ceph_inode_info *ci, int flags) 1902 { 1903 struct inode *inode = &ci->netfs.inode; 1904 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); 1905 struct ceph_cap *cap; 1906 u64 flush_tid, oldest_flush_tid; 1907 int file_wanted, used, cap_used; 1908 int issued, implemented, want, retain, revoking, flushing = 0; 1909 int mds = -1; /* keep track of how far we've gone through i_caps list 1910 to avoid an infinite loop on retry */ 1911 struct rb_node *p; 1912 bool queue_invalidate = false; 1913 bool tried_invalidate = false; 1914 bool queue_writeback = false; 1915 struct ceph_mds_session *session = NULL; 1916 1917 spin_lock(&ci->i_ceph_lock); 1918 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) { 1919 ci->i_ceph_flags |= CEPH_I_ASYNC_CHECK_CAPS; 1920 1921 /* Don't send messages until we get async create reply */ 1922 spin_unlock(&ci->i_ceph_lock); 1923 return; 1924 } 1925 1926 if (ci->i_ceph_flags & CEPH_I_FLUSH) 1927 flags |= CHECK_CAPS_FLUSH; 1928 retry: 1929 /* Caps wanted by virtue of active open files. */ 1930 file_wanted = __ceph_caps_file_wanted(ci); 1931 1932 /* Caps which have active references against them */ 1933 used = __ceph_caps_used(ci); 1934 1935 /* 1936 * "issued" represents the current caps that the MDS wants us to have. 1937 * "implemented" is the set that we have been granted, and includes the 1938 * ones that have not yet been returned to the MDS (the "revoking" set, 1939 * usually because they have outstanding references). 1940 */ 1941 issued = __ceph_caps_issued(ci, &implemented); 1942 revoking = implemented & ~issued; 1943 1944 want = file_wanted; 1945 1946 /* The ones we currently want to retain (may be adjusted below) */ 1947 retain = file_wanted | used | CEPH_CAP_PIN; 1948 if (!mdsc->stopping && inode->i_nlink > 0) { 1949 if (file_wanted) { 1950 retain |= CEPH_CAP_ANY; /* be greedy */ 1951 } else if (S_ISDIR(inode->i_mode) && 1952 (issued & CEPH_CAP_FILE_SHARED) && 1953 __ceph_dir_is_complete(ci)) { 1954 /* 1955 * If a directory is complete, we want to keep 1956 * the exclusive cap. So that MDS does not end up 1957 * revoking the shared cap on every create/unlink 1958 * operation. 1959 */ 1960 if (IS_RDONLY(inode)) { 1961 want = CEPH_CAP_ANY_SHARED; 1962 } else { 1963 want |= CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL; 1964 } 1965 retain |= want; 1966 } else { 1967 1968 retain |= CEPH_CAP_ANY_SHARED; 1969 /* 1970 * keep RD only if we didn't have the file open RW, 1971 * because then the mds would revoke it anyway to 1972 * journal max_size=0. 1973 */ 1974 if (ci->i_max_size == 0) 1975 retain |= CEPH_CAP_ANY_RD; 1976 } 1977 } 1978 1979 dout("check_caps %llx.%llx file_want %s used %s dirty %s flushing %s" 1980 " issued %s revoking %s retain %s %s%s%s\n", ceph_vinop(inode), 1981 ceph_cap_string(file_wanted), 1982 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps), 1983 ceph_cap_string(ci->i_flushing_caps), 1984 ceph_cap_string(issued), ceph_cap_string(revoking), 1985 ceph_cap_string(retain), 1986 (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "", 1987 (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "", 1988 (flags & CHECK_CAPS_NOINVAL) ? " NOINVAL" : ""); 1989 1990 /* 1991 * If we no longer need to hold onto old our caps, and we may 1992 * have cached pages, but don't want them, then try to invalidate. 1993 * If we fail, it's because pages are locked.... try again later. 1994 */ 1995 if ((!(flags & CHECK_CAPS_NOINVAL) || mdsc->stopping) && 1996 S_ISREG(inode->i_mode) && 1997 !(ci->i_wb_ref || ci->i_wrbuffer_ref) && /* no dirty pages... */ 1998 inode->i_data.nrpages && /* have cached pages */ 1999 (revoking & (CEPH_CAP_FILE_CACHE| 2000 CEPH_CAP_FILE_LAZYIO)) && /* or revoking cache */ 2001 !tried_invalidate) { 2002 dout("check_caps trying to invalidate on %llx.%llx\n", 2003 ceph_vinop(inode)); 2004 if (try_nonblocking_invalidate(inode) < 0) { 2005 dout("check_caps queuing invalidate\n"); 2006 queue_invalidate = true; 2007 ci->i_rdcache_revoking = ci->i_rdcache_gen; 2008 } 2009 tried_invalidate = true; 2010 goto retry; 2011 } 2012 2013 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) { 2014 int mflags = 0; 2015 struct cap_msg_args arg; 2016 2017 cap = rb_entry(p, struct ceph_cap, ci_node); 2018 2019 /* avoid looping forever */ 2020 if (mds >= cap->mds || 2021 ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap)) 2022 continue; 2023 2024 /* 2025 * If we have an auth cap, we don't need to consider any 2026 * overlapping caps as used. 2027 */ 2028 cap_used = used; 2029 if (ci->i_auth_cap && cap != ci->i_auth_cap) 2030 cap_used &= ~ci->i_auth_cap->issued; 2031 2032 revoking = cap->implemented & ~cap->issued; 2033 dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n", 2034 cap->mds, cap, ceph_cap_string(cap_used), 2035 ceph_cap_string(cap->issued), 2036 ceph_cap_string(cap->implemented), 2037 ceph_cap_string(revoking)); 2038 2039 if (cap == ci->i_auth_cap && 2040 (cap->issued & CEPH_CAP_FILE_WR)) { 2041 /* request larger max_size from MDS? */ 2042 if (ci->i_wanted_max_size > ci->i_max_size && 2043 ci->i_wanted_max_size > ci->i_requested_max_size) { 2044 dout("requesting new max_size\n"); 2045 goto ack; 2046 } 2047 2048 /* approaching file_max? */ 2049 if (__ceph_should_report_size(ci)) { 2050 dout("i_size approaching max_size\n"); 2051 goto ack; 2052 } 2053 } 2054 /* flush anything dirty? */ 2055 if (cap == ci->i_auth_cap) { 2056 if ((flags & CHECK_CAPS_FLUSH) && ci->i_dirty_caps) { 2057 dout("flushing dirty caps\n"); 2058 goto ack; 2059 } 2060 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) { 2061 dout("flushing snap caps\n"); 2062 goto ack; 2063 } 2064 } 2065 2066 /* completed revocation? going down and there are no caps? */ 2067 if (revoking) { 2068 if ((revoking & cap_used) == 0) { 2069 dout("completed revocation of %s\n", 2070 ceph_cap_string(cap->implemented & ~cap->issued)); 2071 goto ack; 2072 } 2073 2074 /* 2075 * If the "i_wrbuffer_ref" was increased by mmap or generic 2076 * cache write just before the ceph_check_caps() is called, 2077 * the Fb capability revoking will fail this time. Then we 2078 * must wait for the BDI's delayed work to flush the dirty 2079 * pages and to release the "i_wrbuffer_ref", which will cost 2080 * at most 5 seconds. That means the MDS needs to wait at 2081 * most 5 seconds to finished the Fb capability's revocation. 2082 * 2083 * Let's queue a writeback for it. 2084 */ 2085 if (S_ISREG(inode->i_mode) && ci->i_wrbuffer_ref && 2086 (revoking & CEPH_CAP_FILE_BUFFER)) 2087 queue_writeback = true; 2088 } 2089 2090 /* want more caps from mds? */ 2091 if (want & ~cap->mds_wanted) { 2092 if (want & ~(cap->mds_wanted | cap->issued)) 2093 goto ack; 2094 if (!__cap_is_valid(cap)) 2095 goto ack; 2096 } 2097 2098 /* things we might delay */ 2099 if ((cap->issued & ~retain) == 0) 2100 continue; /* nope, all good */ 2101 2102 ack: 2103 ceph_put_mds_session(session); 2104 session = ceph_get_mds_session(cap->session); 2105 2106 /* kick flushing and flush snaps before sending normal 2107 * cap message */ 2108 if (cap == ci->i_auth_cap && 2109 (ci->i_ceph_flags & 2110 (CEPH_I_KICK_FLUSH | CEPH_I_FLUSH_SNAPS))) { 2111 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) 2112 __kick_flushing_caps(mdsc, session, ci, 0); 2113 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) 2114 __ceph_flush_snaps(ci, session); 2115 2116 goto retry; 2117 } 2118 2119 if (cap == ci->i_auth_cap && ci->i_dirty_caps) { 2120 flushing = ci->i_dirty_caps; 2121 flush_tid = __mark_caps_flushing(inode, session, false, 2122 &oldest_flush_tid); 2123 if (flags & CHECK_CAPS_FLUSH && 2124 list_empty(&session->s_cap_dirty)) 2125 mflags |= CEPH_CLIENT_CAPS_SYNC; 2126 } else { 2127 flushing = 0; 2128 flush_tid = 0; 2129 spin_lock(&mdsc->cap_dirty_lock); 2130 oldest_flush_tid = __get_oldest_flush_tid(mdsc); 2131 spin_unlock(&mdsc->cap_dirty_lock); 2132 } 2133 2134 mds = cap->mds; /* remember mds, so we don't repeat */ 2135 2136 __prep_cap(&arg, cap, CEPH_CAP_OP_UPDATE, mflags, cap_used, 2137 want, retain, flushing, flush_tid, oldest_flush_tid); 2138 2139 spin_unlock(&ci->i_ceph_lock); 2140 __send_cap(&arg, ci); 2141 spin_lock(&ci->i_ceph_lock); 2142 2143 goto retry; /* retake i_ceph_lock and restart our cap scan. */ 2144 } 2145 2146 /* periodically re-calculate caps wanted by open files */ 2147 if (__ceph_is_any_real_caps(ci) && 2148 list_empty(&ci->i_cap_delay_list) && 2149 (file_wanted & ~CEPH_CAP_PIN) && 2150 !(used & (CEPH_CAP_FILE_RD | CEPH_CAP_ANY_FILE_WR))) { 2151 __cap_delay_requeue(mdsc, ci); 2152 } 2153 2154 spin_unlock(&ci->i_ceph_lock); 2155 2156 ceph_put_mds_session(session); 2157 if (queue_writeback) 2158 ceph_queue_writeback(inode); 2159 if (queue_invalidate) 2160 ceph_queue_invalidate(inode); 2161 } 2162 2163 /* 2164 * Try to flush dirty caps back to the auth mds. 2165 */ 2166 static int try_flush_caps(struct inode *inode, u64 *ptid) 2167 { 2168 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 2169 struct ceph_inode_info *ci = ceph_inode(inode); 2170 int flushing = 0; 2171 u64 flush_tid = 0, oldest_flush_tid = 0; 2172 2173 spin_lock(&ci->i_ceph_lock); 2174 retry_locked: 2175 if (ci->i_dirty_caps && ci->i_auth_cap) { 2176 struct ceph_cap *cap = ci->i_auth_cap; 2177 struct cap_msg_args arg; 2178 struct ceph_mds_session *session = cap->session; 2179 2180 if (session->s_state < CEPH_MDS_SESSION_OPEN) { 2181 spin_unlock(&ci->i_ceph_lock); 2182 goto out; 2183 } 2184 2185 if (ci->i_ceph_flags & 2186 (CEPH_I_KICK_FLUSH | CEPH_I_FLUSH_SNAPS)) { 2187 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) 2188 __kick_flushing_caps(mdsc, session, ci, 0); 2189 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) 2190 __ceph_flush_snaps(ci, session); 2191 goto retry_locked; 2192 } 2193 2194 flushing = ci->i_dirty_caps; 2195 flush_tid = __mark_caps_flushing(inode, session, true, 2196 &oldest_flush_tid); 2197 2198 __prep_cap(&arg, cap, CEPH_CAP_OP_FLUSH, CEPH_CLIENT_CAPS_SYNC, 2199 __ceph_caps_used(ci), __ceph_caps_wanted(ci), 2200 (cap->issued | cap->implemented), 2201 flushing, flush_tid, oldest_flush_tid); 2202 spin_unlock(&ci->i_ceph_lock); 2203 2204 __send_cap(&arg, ci); 2205 } else { 2206 if (!list_empty(&ci->i_cap_flush_list)) { 2207 struct ceph_cap_flush *cf = 2208 list_last_entry(&ci->i_cap_flush_list, 2209 struct ceph_cap_flush, i_list); 2210 cf->wake = true; 2211 flush_tid = cf->tid; 2212 } 2213 flushing = ci->i_flushing_caps; 2214 spin_unlock(&ci->i_ceph_lock); 2215 } 2216 out: 2217 *ptid = flush_tid; 2218 return flushing; 2219 } 2220 2221 /* 2222 * Return true if we've flushed caps through the given flush_tid. 2223 */ 2224 static int caps_are_flushed(struct inode *inode, u64 flush_tid) 2225 { 2226 struct ceph_inode_info *ci = ceph_inode(inode); 2227 int ret = 1; 2228 2229 spin_lock(&ci->i_ceph_lock); 2230 if (!list_empty(&ci->i_cap_flush_list)) { 2231 struct ceph_cap_flush * cf = 2232 list_first_entry(&ci->i_cap_flush_list, 2233 struct ceph_cap_flush, i_list); 2234 if (cf->tid <= flush_tid) 2235 ret = 0; 2236 } 2237 spin_unlock(&ci->i_ceph_lock); 2238 return ret; 2239 } 2240 2241 /* 2242 * flush the mdlog and wait for any unsafe requests to complete. 2243 */ 2244 static int flush_mdlog_and_wait_inode_unsafe_requests(struct inode *inode) 2245 { 2246 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 2247 struct ceph_inode_info *ci = ceph_inode(inode); 2248 struct ceph_mds_request *req1 = NULL, *req2 = NULL; 2249 int ret, err = 0; 2250 2251 spin_lock(&ci->i_unsafe_lock); 2252 if (S_ISDIR(inode->i_mode) && !list_empty(&ci->i_unsafe_dirops)) { 2253 req1 = list_last_entry(&ci->i_unsafe_dirops, 2254 struct ceph_mds_request, 2255 r_unsafe_dir_item); 2256 ceph_mdsc_get_request(req1); 2257 } 2258 if (!list_empty(&ci->i_unsafe_iops)) { 2259 req2 = list_last_entry(&ci->i_unsafe_iops, 2260 struct ceph_mds_request, 2261 r_unsafe_target_item); 2262 ceph_mdsc_get_request(req2); 2263 } 2264 spin_unlock(&ci->i_unsafe_lock); 2265 2266 /* 2267 * Trigger to flush the journal logs in all the relevant MDSes 2268 * manually, or in the worst case we must wait at most 5 seconds 2269 * to wait the journal logs to be flushed by the MDSes periodically. 2270 */ 2271 if (req1 || req2) { 2272 struct ceph_mds_request *req; 2273 struct ceph_mds_session **sessions; 2274 struct ceph_mds_session *s; 2275 unsigned int max_sessions; 2276 int i; 2277 2278 mutex_lock(&mdsc->mutex); 2279 max_sessions = mdsc->max_sessions; 2280 2281 sessions = kcalloc(max_sessions, sizeof(s), GFP_KERNEL); 2282 if (!sessions) { 2283 mutex_unlock(&mdsc->mutex); 2284 err = -ENOMEM; 2285 goto out; 2286 } 2287 2288 spin_lock(&ci->i_unsafe_lock); 2289 if (req1) { 2290 list_for_each_entry(req, &ci->i_unsafe_dirops, 2291 r_unsafe_dir_item) { 2292 s = req->r_session; 2293 if (!s) 2294 continue; 2295 if (!sessions[s->s_mds]) { 2296 s = ceph_get_mds_session(s); 2297 sessions[s->s_mds] = s; 2298 } 2299 } 2300 } 2301 if (req2) { 2302 list_for_each_entry(req, &ci->i_unsafe_iops, 2303 r_unsafe_target_item) { 2304 s = req->r_session; 2305 if (!s) 2306 continue; 2307 if (!sessions[s->s_mds]) { 2308 s = ceph_get_mds_session(s); 2309 sessions[s->s_mds] = s; 2310 } 2311 } 2312 } 2313 spin_unlock(&ci->i_unsafe_lock); 2314 2315 /* the auth MDS */ 2316 spin_lock(&ci->i_ceph_lock); 2317 if (ci->i_auth_cap) { 2318 s = ci->i_auth_cap->session; 2319 if (!sessions[s->s_mds]) 2320 sessions[s->s_mds] = ceph_get_mds_session(s); 2321 } 2322 spin_unlock(&ci->i_ceph_lock); 2323 mutex_unlock(&mdsc->mutex); 2324 2325 /* send flush mdlog request to MDSes */ 2326 for (i = 0; i < max_sessions; i++) { 2327 s = sessions[i]; 2328 if (s) { 2329 send_flush_mdlog(s); 2330 ceph_put_mds_session(s); 2331 } 2332 } 2333 kfree(sessions); 2334 } 2335 2336 dout("%s %p wait on tid %llu %llu\n", __func__, 2337 inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL); 2338 if (req1) { 2339 ret = !wait_for_completion_timeout(&req1->r_safe_completion, 2340 ceph_timeout_jiffies(req1->r_timeout)); 2341 if (ret) 2342 err = -EIO; 2343 } 2344 if (req2) { 2345 ret = !wait_for_completion_timeout(&req2->r_safe_completion, 2346 ceph_timeout_jiffies(req2->r_timeout)); 2347 if (ret) 2348 err = -EIO; 2349 } 2350 2351 out: 2352 if (req1) 2353 ceph_mdsc_put_request(req1); 2354 if (req2) 2355 ceph_mdsc_put_request(req2); 2356 return err; 2357 } 2358 2359 int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync) 2360 { 2361 struct inode *inode = file->f_mapping->host; 2362 struct ceph_inode_info *ci = ceph_inode(inode); 2363 u64 flush_tid; 2364 int ret, err; 2365 int dirty; 2366 2367 dout("fsync %p%s\n", inode, datasync ? " datasync" : ""); 2368 2369 ret = file_write_and_wait_range(file, start, end); 2370 if (datasync) 2371 goto out; 2372 2373 ret = ceph_wait_on_async_create(inode); 2374 if (ret) 2375 goto out; 2376 2377 dirty = try_flush_caps(inode, &flush_tid); 2378 dout("fsync dirty caps are %s\n", ceph_cap_string(dirty)); 2379 2380 err = flush_mdlog_and_wait_inode_unsafe_requests(inode); 2381 2382 /* 2383 * only wait on non-file metadata writeback (the mds 2384 * can recover size and mtime, so we don't need to 2385 * wait for that) 2386 */ 2387 if (!err && (dirty & ~CEPH_CAP_ANY_FILE_WR)) { 2388 err = wait_event_interruptible(ci->i_cap_wq, 2389 caps_are_flushed(inode, flush_tid)); 2390 } 2391 2392 if (err < 0) 2393 ret = err; 2394 2395 err = file_check_and_advance_wb_err(file); 2396 if (err < 0) 2397 ret = err; 2398 out: 2399 dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret); 2400 return ret; 2401 } 2402 2403 /* 2404 * Flush any dirty caps back to the mds. If we aren't asked to wait, 2405 * queue inode for flush but don't do so immediately, because we can 2406 * get by with fewer MDS messages if we wait for data writeback to 2407 * complete first. 2408 */ 2409 int ceph_write_inode(struct inode *inode, struct writeback_control *wbc) 2410 { 2411 struct ceph_inode_info *ci = ceph_inode(inode); 2412 u64 flush_tid; 2413 int err = 0; 2414 int dirty; 2415 int wait = (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync); 2416 2417 dout("write_inode %p wait=%d\n", inode, wait); 2418 ceph_fscache_unpin_writeback(inode, wbc); 2419 if (wait) { 2420 err = ceph_wait_on_async_create(inode); 2421 if (err) 2422 return err; 2423 dirty = try_flush_caps(inode, &flush_tid); 2424 if (dirty) 2425 err = wait_event_interruptible(ci->i_cap_wq, 2426 caps_are_flushed(inode, flush_tid)); 2427 } else { 2428 struct ceph_mds_client *mdsc = 2429 ceph_sb_to_client(inode->i_sb)->mdsc; 2430 2431 spin_lock(&ci->i_ceph_lock); 2432 if (__ceph_caps_dirty(ci)) 2433 __cap_delay_requeue_front(mdsc, ci); 2434 spin_unlock(&ci->i_ceph_lock); 2435 } 2436 return err; 2437 } 2438 2439 static void __kick_flushing_caps(struct ceph_mds_client *mdsc, 2440 struct ceph_mds_session *session, 2441 struct ceph_inode_info *ci, 2442 u64 oldest_flush_tid) 2443 __releases(ci->i_ceph_lock) 2444 __acquires(ci->i_ceph_lock) 2445 { 2446 struct inode *inode = &ci->netfs.inode; 2447 struct ceph_cap *cap; 2448 struct ceph_cap_flush *cf; 2449 int ret; 2450 u64 first_tid = 0; 2451 u64 last_snap_flush = 0; 2452 2453 /* Don't do anything until create reply comes in */ 2454 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) 2455 return; 2456 2457 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH; 2458 2459 list_for_each_entry_reverse(cf, &ci->i_cap_flush_list, i_list) { 2460 if (cf->is_capsnap) { 2461 last_snap_flush = cf->tid; 2462 break; 2463 } 2464 } 2465 2466 list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) { 2467 if (cf->tid < first_tid) 2468 continue; 2469 2470 cap = ci->i_auth_cap; 2471 if (!(cap && cap->session == session)) { 2472 pr_err("%p auth cap %p not mds%d ???\n", 2473 inode, cap, session->s_mds); 2474 break; 2475 } 2476 2477 first_tid = cf->tid + 1; 2478 2479 if (!cf->is_capsnap) { 2480 struct cap_msg_args arg; 2481 2482 dout("kick_flushing_caps %p cap %p tid %llu %s\n", 2483 inode, cap, cf->tid, ceph_cap_string(cf->caps)); 2484 __prep_cap(&arg, cap, CEPH_CAP_OP_FLUSH, 2485 (cf->tid < last_snap_flush ? 2486 CEPH_CLIENT_CAPS_PENDING_CAPSNAP : 0), 2487 __ceph_caps_used(ci), 2488 __ceph_caps_wanted(ci), 2489 (cap->issued | cap->implemented), 2490 cf->caps, cf->tid, oldest_flush_tid); 2491 spin_unlock(&ci->i_ceph_lock); 2492 __send_cap(&arg, ci); 2493 } else { 2494 struct ceph_cap_snap *capsnap = 2495 container_of(cf, struct ceph_cap_snap, 2496 cap_flush); 2497 dout("kick_flushing_caps %p capsnap %p tid %llu %s\n", 2498 inode, capsnap, cf->tid, 2499 ceph_cap_string(capsnap->dirty)); 2500 2501 refcount_inc(&capsnap->nref); 2502 spin_unlock(&ci->i_ceph_lock); 2503 2504 ret = __send_flush_snap(inode, session, capsnap, cap->mseq, 2505 oldest_flush_tid); 2506 if (ret < 0) { 2507 pr_err("kick_flushing_caps: error sending " 2508 "cap flushsnap, ino (%llx.%llx) " 2509 "tid %llu follows %llu\n", 2510 ceph_vinop(inode), cf->tid, 2511 capsnap->follows); 2512 } 2513 2514 ceph_put_cap_snap(capsnap); 2515 } 2516 2517 spin_lock(&ci->i_ceph_lock); 2518 } 2519 } 2520 2521 void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc, 2522 struct ceph_mds_session *session) 2523 { 2524 struct ceph_inode_info *ci; 2525 struct ceph_cap *cap; 2526 u64 oldest_flush_tid; 2527 2528 dout("early_kick_flushing_caps mds%d\n", session->s_mds); 2529 2530 spin_lock(&mdsc->cap_dirty_lock); 2531 oldest_flush_tid = __get_oldest_flush_tid(mdsc); 2532 spin_unlock(&mdsc->cap_dirty_lock); 2533 2534 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { 2535 spin_lock(&ci->i_ceph_lock); 2536 cap = ci->i_auth_cap; 2537 if (!(cap && cap->session == session)) { 2538 pr_err("%p auth cap %p not mds%d ???\n", 2539 &ci->netfs.inode, cap, session->s_mds); 2540 spin_unlock(&ci->i_ceph_lock); 2541 continue; 2542 } 2543 2544 2545 /* 2546 * if flushing caps were revoked, we re-send the cap flush 2547 * in client reconnect stage. This guarantees MDS * processes 2548 * the cap flush message before issuing the flushing caps to 2549 * other client. 2550 */ 2551 if ((cap->issued & ci->i_flushing_caps) != 2552 ci->i_flushing_caps) { 2553 /* encode_caps_cb() also will reset these sequence 2554 * numbers. make sure sequence numbers in cap flush 2555 * message match later reconnect message */ 2556 cap->seq = 0; 2557 cap->issue_seq = 0; 2558 cap->mseq = 0; 2559 __kick_flushing_caps(mdsc, session, ci, 2560 oldest_flush_tid); 2561 } else { 2562 ci->i_ceph_flags |= CEPH_I_KICK_FLUSH; 2563 } 2564 2565 spin_unlock(&ci->i_ceph_lock); 2566 } 2567 } 2568 2569 void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc, 2570 struct ceph_mds_session *session) 2571 { 2572 struct ceph_inode_info *ci; 2573 struct ceph_cap *cap; 2574 u64 oldest_flush_tid; 2575 2576 lockdep_assert_held(&session->s_mutex); 2577 2578 dout("kick_flushing_caps mds%d\n", session->s_mds); 2579 2580 spin_lock(&mdsc->cap_dirty_lock); 2581 oldest_flush_tid = __get_oldest_flush_tid(mdsc); 2582 spin_unlock(&mdsc->cap_dirty_lock); 2583 2584 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { 2585 spin_lock(&ci->i_ceph_lock); 2586 cap = ci->i_auth_cap; 2587 if (!(cap && cap->session == session)) { 2588 pr_err("%p auth cap %p not mds%d ???\n", 2589 &ci->netfs.inode, cap, session->s_mds); 2590 spin_unlock(&ci->i_ceph_lock); 2591 continue; 2592 } 2593 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) { 2594 __kick_flushing_caps(mdsc, session, ci, 2595 oldest_flush_tid); 2596 } 2597 spin_unlock(&ci->i_ceph_lock); 2598 } 2599 } 2600 2601 void ceph_kick_flushing_inode_caps(struct ceph_mds_session *session, 2602 struct ceph_inode_info *ci) 2603 { 2604 struct ceph_mds_client *mdsc = session->s_mdsc; 2605 struct ceph_cap *cap = ci->i_auth_cap; 2606 2607 lockdep_assert_held(&ci->i_ceph_lock); 2608 2609 dout("%s %p flushing %s\n", __func__, &ci->netfs.inode, 2610 ceph_cap_string(ci->i_flushing_caps)); 2611 2612 if (!list_empty(&ci->i_cap_flush_list)) { 2613 u64 oldest_flush_tid; 2614 spin_lock(&mdsc->cap_dirty_lock); 2615 list_move_tail(&ci->i_flushing_item, 2616 &cap->session->s_cap_flushing); 2617 oldest_flush_tid = __get_oldest_flush_tid(mdsc); 2618 spin_unlock(&mdsc->cap_dirty_lock); 2619 2620 __kick_flushing_caps(mdsc, session, ci, oldest_flush_tid); 2621 } 2622 } 2623 2624 2625 /* 2626 * Take references to capabilities we hold, so that we don't release 2627 * them to the MDS prematurely. 2628 */ 2629 void ceph_take_cap_refs(struct ceph_inode_info *ci, int got, 2630 bool snap_rwsem_locked) 2631 { 2632 lockdep_assert_held(&ci->i_ceph_lock); 2633 2634 if (got & CEPH_CAP_PIN) 2635 ci->i_pin_ref++; 2636 if (got & CEPH_CAP_FILE_RD) 2637 ci->i_rd_ref++; 2638 if (got & CEPH_CAP_FILE_CACHE) 2639 ci->i_rdcache_ref++; 2640 if (got & CEPH_CAP_FILE_EXCL) 2641 ci->i_fx_ref++; 2642 if (got & CEPH_CAP_FILE_WR) { 2643 if (ci->i_wr_ref == 0 && !ci->i_head_snapc) { 2644 BUG_ON(!snap_rwsem_locked); 2645 ci->i_head_snapc = ceph_get_snap_context( 2646 ci->i_snap_realm->cached_context); 2647 } 2648 ci->i_wr_ref++; 2649 } 2650 if (got & CEPH_CAP_FILE_BUFFER) { 2651 if (ci->i_wb_ref == 0) 2652 ihold(&ci->netfs.inode); 2653 ci->i_wb_ref++; 2654 dout("%s %p wb %d -> %d (?)\n", __func__, 2655 &ci->netfs.inode, ci->i_wb_ref-1, ci->i_wb_ref); 2656 } 2657 } 2658 2659 /* 2660 * Try to grab cap references. Specify those refs we @want, and the 2661 * minimal set we @need. Also include the larger offset we are writing 2662 * to (when applicable), and check against max_size here as well. 2663 * Note that caller is responsible for ensuring max_size increases are 2664 * requested from the MDS. 2665 * 2666 * Returns 0 if caps were not able to be acquired (yet), 1 if succeed, 2667 * or a negative error code. There are 3 speical error codes: 2668 * -EAGAIN: need to sleep but non-blocking is specified 2669 * -EFBIG: ask caller to call check_max_size() and try again. 2670 * -EUCLEAN: ask caller to call ceph_renew_caps() and try again. 2671 */ 2672 enum { 2673 /* first 8 bits are reserved for CEPH_FILE_MODE_FOO */ 2674 NON_BLOCKING = (1 << 8), 2675 CHECK_FILELOCK = (1 << 9), 2676 }; 2677 2678 static int try_get_cap_refs(struct inode *inode, int need, int want, 2679 loff_t endoff, int flags, int *got) 2680 { 2681 struct ceph_inode_info *ci = ceph_inode(inode); 2682 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 2683 int ret = 0; 2684 int have, implemented; 2685 bool snap_rwsem_locked = false; 2686 2687 dout("get_cap_refs %p need %s want %s\n", inode, 2688 ceph_cap_string(need), ceph_cap_string(want)); 2689 2690 again: 2691 spin_lock(&ci->i_ceph_lock); 2692 2693 if ((flags & CHECK_FILELOCK) && 2694 (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK)) { 2695 dout("try_get_cap_refs %p error filelock\n", inode); 2696 ret = -EIO; 2697 goto out_unlock; 2698 } 2699 2700 /* finish pending truncate */ 2701 while (ci->i_truncate_pending) { 2702 spin_unlock(&ci->i_ceph_lock); 2703 if (snap_rwsem_locked) { 2704 up_read(&mdsc->snap_rwsem); 2705 snap_rwsem_locked = false; 2706 } 2707 __ceph_do_pending_vmtruncate(inode); 2708 spin_lock(&ci->i_ceph_lock); 2709 } 2710 2711 have = __ceph_caps_issued(ci, &implemented); 2712 2713 if (have & need & CEPH_CAP_FILE_WR) { 2714 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) { 2715 dout("get_cap_refs %p endoff %llu > maxsize %llu\n", 2716 inode, endoff, ci->i_max_size); 2717 if (endoff > ci->i_requested_max_size) 2718 ret = ci->i_auth_cap ? -EFBIG : -EUCLEAN; 2719 goto out_unlock; 2720 } 2721 /* 2722 * If a sync write is in progress, we must wait, so that we 2723 * can get a final snapshot value for size+mtime. 2724 */ 2725 if (__ceph_have_pending_cap_snap(ci)) { 2726 dout("get_cap_refs %p cap_snap_pending\n", inode); 2727 goto out_unlock; 2728 } 2729 } 2730 2731 if ((have & need) == need) { 2732 /* 2733 * Look at (implemented & ~have & not) so that we keep waiting 2734 * on transition from wanted -> needed caps. This is needed 2735 * for WRBUFFER|WR -> WR to avoid a new WR sync write from 2736 * going before a prior buffered writeback happens. 2737 * 2738 * For RDCACHE|RD -> RD, there is not need to wait and we can 2739 * just exclude the revoking caps and force to sync read. 2740 */ 2741 int not = want & ~(have & need); 2742 int revoking = implemented & ~have; 2743 int exclude = revoking & not; 2744 dout("get_cap_refs %p have %s but not %s (revoking %s)\n", 2745 inode, ceph_cap_string(have), ceph_cap_string(not), 2746 ceph_cap_string(revoking)); 2747 if (!exclude || !(exclude & CEPH_CAP_FILE_BUFFER)) { 2748 if (!snap_rwsem_locked && 2749 !ci->i_head_snapc && 2750 (need & CEPH_CAP_FILE_WR)) { 2751 if (!down_read_trylock(&mdsc->snap_rwsem)) { 2752 /* 2753 * we can not call down_read() when 2754 * task isn't in TASK_RUNNING state 2755 */ 2756 if (flags & NON_BLOCKING) { 2757 ret = -EAGAIN; 2758 goto out_unlock; 2759 } 2760 2761 spin_unlock(&ci->i_ceph_lock); 2762 down_read(&mdsc->snap_rwsem); 2763 snap_rwsem_locked = true; 2764 goto again; 2765 } 2766 snap_rwsem_locked = true; 2767 } 2768 if ((have & want) == want) 2769 *got = need | (want & ~exclude); 2770 else 2771 *got = need; 2772 ceph_take_cap_refs(ci, *got, true); 2773 ret = 1; 2774 } 2775 } else { 2776 int session_readonly = false; 2777 int mds_wanted; 2778 if (ci->i_auth_cap && 2779 (need & (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_EXCL))) { 2780 struct ceph_mds_session *s = ci->i_auth_cap->session; 2781 spin_lock(&s->s_cap_lock); 2782 session_readonly = s->s_readonly; 2783 spin_unlock(&s->s_cap_lock); 2784 } 2785 if (session_readonly) { 2786 dout("get_cap_refs %p need %s but mds%d readonly\n", 2787 inode, ceph_cap_string(need), ci->i_auth_cap->mds); 2788 ret = -EROFS; 2789 goto out_unlock; 2790 } 2791 2792 if (ceph_inode_is_shutdown(inode)) { 2793 dout("get_cap_refs %p inode is shutdown\n", inode); 2794 ret = -ESTALE; 2795 goto out_unlock; 2796 } 2797 mds_wanted = __ceph_caps_mds_wanted(ci, false); 2798 if (need & ~mds_wanted) { 2799 dout("get_cap_refs %p need %s > mds_wanted %s\n", 2800 inode, ceph_cap_string(need), 2801 ceph_cap_string(mds_wanted)); 2802 ret = -EUCLEAN; 2803 goto out_unlock; 2804 } 2805 2806 dout("get_cap_refs %p have %s need %s\n", inode, 2807 ceph_cap_string(have), ceph_cap_string(need)); 2808 } 2809 out_unlock: 2810 2811 __ceph_touch_fmode(ci, mdsc, flags); 2812 2813 spin_unlock(&ci->i_ceph_lock); 2814 if (snap_rwsem_locked) 2815 up_read(&mdsc->snap_rwsem); 2816 2817 if (!ret) 2818 ceph_update_cap_mis(&mdsc->metric); 2819 else if (ret == 1) 2820 ceph_update_cap_hit(&mdsc->metric); 2821 2822 dout("get_cap_refs %p ret %d got %s\n", inode, 2823 ret, ceph_cap_string(*got)); 2824 return ret; 2825 } 2826 2827 /* 2828 * Check the offset we are writing up to against our current 2829 * max_size. If necessary, tell the MDS we want to write to 2830 * a larger offset. 2831 */ 2832 static void check_max_size(struct inode *inode, loff_t endoff) 2833 { 2834 struct ceph_inode_info *ci = ceph_inode(inode); 2835 int check = 0; 2836 2837 /* do we need to explicitly request a larger max_size? */ 2838 spin_lock(&ci->i_ceph_lock); 2839 if (endoff >= ci->i_max_size && endoff > ci->i_wanted_max_size) { 2840 dout("write %p at large endoff %llu, req max_size\n", 2841 inode, endoff); 2842 ci->i_wanted_max_size = endoff; 2843 } 2844 /* duplicate ceph_check_caps()'s logic */ 2845 if (ci->i_auth_cap && 2846 (ci->i_auth_cap->issued & CEPH_CAP_FILE_WR) && 2847 ci->i_wanted_max_size > ci->i_max_size && 2848 ci->i_wanted_max_size > ci->i_requested_max_size) 2849 check = 1; 2850 spin_unlock(&ci->i_ceph_lock); 2851 if (check) 2852 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY); 2853 } 2854 2855 static inline int get_used_fmode(int caps) 2856 { 2857 int fmode = 0; 2858 if (caps & CEPH_CAP_FILE_RD) 2859 fmode |= CEPH_FILE_MODE_RD; 2860 if (caps & CEPH_CAP_FILE_WR) 2861 fmode |= CEPH_FILE_MODE_WR; 2862 return fmode; 2863 } 2864 2865 int ceph_try_get_caps(struct inode *inode, int need, int want, 2866 bool nonblock, int *got) 2867 { 2868 int ret, flags; 2869 2870 BUG_ON(need & ~CEPH_CAP_FILE_RD); 2871 BUG_ON(want & ~(CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO | 2872 CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL | 2873 CEPH_CAP_ANY_DIR_OPS)); 2874 if (need) { 2875 ret = ceph_pool_perm_check(inode, need); 2876 if (ret < 0) 2877 return ret; 2878 } 2879 2880 flags = get_used_fmode(need | want); 2881 if (nonblock) 2882 flags |= NON_BLOCKING; 2883 2884 ret = try_get_cap_refs(inode, need, want, 0, flags, got); 2885 /* three special error codes */ 2886 if (ret == -EAGAIN || ret == -EFBIG || ret == -EUCLEAN) 2887 ret = 0; 2888 return ret; 2889 } 2890 2891 /* 2892 * Wait for caps, and take cap references. If we can't get a WR cap 2893 * due to a small max_size, make sure we check_max_size (and possibly 2894 * ask the mds) so we don't get hung up indefinitely. 2895 */ 2896 int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got) 2897 { 2898 struct ceph_file_info *fi = filp->private_data; 2899 struct inode *inode = file_inode(filp); 2900 struct ceph_inode_info *ci = ceph_inode(inode); 2901 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 2902 int ret, _got, flags; 2903 2904 ret = ceph_pool_perm_check(inode, need); 2905 if (ret < 0) 2906 return ret; 2907 2908 if ((fi->fmode & CEPH_FILE_MODE_WR) && 2909 fi->filp_gen != READ_ONCE(fsc->filp_gen)) 2910 return -EBADF; 2911 2912 flags = get_used_fmode(need | want); 2913 2914 while (true) { 2915 flags &= CEPH_FILE_MODE_MASK; 2916 if (atomic_read(&fi->num_locks)) 2917 flags |= CHECK_FILELOCK; 2918 _got = 0; 2919 ret = try_get_cap_refs(inode, need, want, endoff, 2920 flags, &_got); 2921 WARN_ON_ONCE(ret == -EAGAIN); 2922 if (!ret) { 2923 struct ceph_mds_client *mdsc = fsc->mdsc; 2924 struct cap_wait cw; 2925 DEFINE_WAIT_FUNC(wait, woken_wake_function); 2926 2927 cw.ino = ceph_ino(inode); 2928 cw.tgid = current->tgid; 2929 cw.need = need; 2930 cw.want = want; 2931 2932 spin_lock(&mdsc->caps_list_lock); 2933 list_add(&cw.list, &mdsc->cap_wait_list); 2934 spin_unlock(&mdsc->caps_list_lock); 2935 2936 /* make sure used fmode not timeout */ 2937 ceph_get_fmode(ci, flags, FMODE_WAIT_BIAS); 2938 add_wait_queue(&ci->i_cap_wq, &wait); 2939 2940 flags |= NON_BLOCKING; 2941 while (!(ret = try_get_cap_refs(inode, need, want, 2942 endoff, flags, &_got))) { 2943 if (signal_pending(current)) { 2944 ret = -ERESTARTSYS; 2945 break; 2946 } 2947 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); 2948 } 2949 2950 remove_wait_queue(&ci->i_cap_wq, &wait); 2951 ceph_put_fmode(ci, flags, FMODE_WAIT_BIAS); 2952 2953 spin_lock(&mdsc->caps_list_lock); 2954 list_del(&cw.list); 2955 spin_unlock(&mdsc->caps_list_lock); 2956 2957 if (ret == -EAGAIN) 2958 continue; 2959 } 2960 2961 if ((fi->fmode & CEPH_FILE_MODE_WR) && 2962 fi->filp_gen != READ_ONCE(fsc->filp_gen)) { 2963 if (ret >= 0 && _got) 2964 ceph_put_cap_refs(ci, _got); 2965 return -EBADF; 2966 } 2967 2968 if (ret < 0) { 2969 if (ret == -EFBIG || ret == -EUCLEAN) { 2970 int ret2 = ceph_wait_on_async_create(inode); 2971 if (ret2 < 0) 2972 return ret2; 2973 } 2974 if (ret == -EFBIG) { 2975 check_max_size(inode, endoff); 2976 continue; 2977 } 2978 if (ret == -EUCLEAN) { 2979 /* session was killed, try renew caps */ 2980 ret = ceph_renew_caps(inode, flags); 2981 if (ret == 0) 2982 continue; 2983 } 2984 return ret; 2985 } 2986 2987 if (S_ISREG(ci->netfs.inode.i_mode) && 2988 ceph_has_inline_data(ci) && 2989 (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) && 2990 i_size_read(inode) > 0) { 2991 struct page *page = 2992 find_get_page(inode->i_mapping, 0); 2993 if (page) { 2994 bool uptodate = PageUptodate(page); 2995 2996 put_page(page); 2997 if (uptodate) 2998 break; 2999 } 3000 /* 3001 * drop cap refs first because getattr while 3002 * holding * caps refs can cause deadlock. 3003 */ 3004 ceph_put_cap_refs(ci, _got); 3005 _got = 0; 3006 3007 /* 3008 * getattr request will bring inline data into 3009 * page cache 3010 */ 3011 ret = __ceph_do_getattr(inode, NULL, 3012 CEPH_STAT_CAP_INLINE_DATA, 3013 true); 3014 if (ret < 0) 3015 return ret; 3016 continue; 3017 } 3018 break; 3019 } 3020 *got = _got; 3021 return 0; 3022 } 3023 3024 /* 3025 * Take cap refs. Caller must already know we hold at least one ref 3026 * on the caps in question or we don't know this is safe. 3027 */ 3028 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps) 3029 { 3030 spin_lock(&ci->i_ceph_lock); 3031 ceph_take_cap_refs(ci, caps, false); 3032 spin_unlock(&ci->i_ceph_lock); 3033 } 3034 3035 3036 /* 3037 * drop cap_snap that is not associated with any snapshot. 3038 * we don't need to send FLUSHSNAP message for it. 3039 */ 3040 static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci, 3041 struct ceph_cap_snap *capsnap) 3042 { 3043 if (!capsnap->need_flush && 3044 !capsnap->writing && !capsnap->dirty_pages) { 3045 dout("dropping cap_snap %p follows %llu\n", 3046 capsnap, capsnap->follows); 3047 BUG_ON(capsnap->cap_flush.tid > 0); 3048 ceph_put_snap_context(capsnap->context); 3049 if (!list_is_last(&capsnap->ci_item, &ci->i_cap_snaps)) 3050 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS; 3051 3052 list_del(&capsnap->ci_item); 3053 ceph_put_cap_snap(capsnap); 3054 return 1; 3055 } 3056 return 0; 3057 } 3058 3059 enum put_cap_refs_mode { 3060 PUT_CAP_REFS_SYNC = 0, 3061 PUT_CAP_REFS_NO_CHECK, 3062 PUT_CAP_REFS_ASYNC, 3063 }; 3064 3065 /* 3066 * Release cap refs. 3067 * 3068 * If we released the last ref on any given cap, call ceph_check_caps 3069 * to release (or schedule a release). 3070 * 3071 * If we are releasing a WR cap (from a sync write), finalize any affected 3072 * cap_snap, and wake up any waiters. 3073 */ 3074 static void __ceph_put_cap_refs(struct ceph_inode_info *ci, int had, 3075 enum put_cap_refs_mode mode) 3076 { 3077 struct inode *inode = &ci->netfs.inode; 3078 int last = 0, put = 0, flushsnaps = 0, wake = 0; 3079 bool check_flushsnaps = false; 3080 3081 spin_lock(&ci->i_ceph_lock); 3082 if (had & CEPH_CAP_PIN) 3083 --ci->i_pin_ref; 3084 if (had & CEPH_CAP_FILE_RD) 3085 if (--ci->i_rd_ref == 0) 3086 last++; 3087 if (had & CEPH_CAP_FILE_CACHE) 3088 if (--ci->i_rdcache_ref == 0) 3089 last++; 3090 if (had & CEPH_CAP_FILE_EXCL) 3091 if (--ci->i_fx_ref == 0) 3092 last++; 3093 if (had & CEPH_CAP_FILE_BUFFER) { 3094 if (--ci->i_wb_ref == 0) { 3095 last++; 3096 /* put the ref held by ceph_take_cap_refs() */ 3097 put++; 3098 check_flushsnaps = true; 3099 } 3100 dout("put_cap_refs %p wb %d -> %d (?)\n", 3101 inode, ci->i_wb_ref+1, ci->i_wb_ref); 3102 } 3103 if (had & CEPH_CAP_FILE_WR) { 3104 if (--ci->i_wr_ref == 0) { 3105 last++; 3106 check_flushsnaps = true; 3107 if (ci->i_wrbuffer_ref_head == 0 && 3108 ci->i_dirty_caps == 0 && 3109 ci->i_flushing_caps == 0) { 3110 BUG_ON(!ci->i_head_snapc); 3111 ceph_put_snap_context(ci->i_head_snapc); 3112 ci->i_head_snapc = NULL; 3113 } 3114 /* see comment in __ceph_remove_cap() */ 3115 if (!__ceph_is_any_real_caps(ci) && ci->i_snap_realm) 3116 ceph_change_snap_realm(inode, NULL); 3117 } 3118 } 3119 if (check_flushsnaps && __ceph_have_pending_cap_snap(ci)) { 3120 struct ceph_cap_snap *capsnap = 3121 list_last_entry(&ci->i_cap_snaps, 3122 struct ceph_cap_snap, 3123 ci_item); 3124 3125 capsnap->writing = 0; 3126 if (ceph_try_drop_cap_snap(ci, capsnap)) 3127 /* put the ref held by ceph_queue_cap_snap() */ 3128 put++; 3129 else if (__ceph_finish_cap_snap(ci, capsnap)) 3130 flushsnaps = 1; 3131 wake = 1; 3132 } 3133 spin_unlock(&ci->i_ceph_lock); 3134 3135 dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had), 3136 last ? " last" : "", put ? " put" : ""); 3137 3138 switch (mode) { 3139 case PUT_CAP_REFS_SYNC: 3140 if (last) 3141 ceph_check_caps(ci, 0); 3142 else if (flushsnaps) 3143 ceph_flush_snaps(ci, NULL); 3144 break; 3145 case PUT_CAP_REFS_ASYNC: 3146 if (last) 3147 ceph_queue_check_caps(inode); 3148 else if (flushsnaps) 3149 ceph_queue_flush_snaps(inode); 3150 break; 3151 default: 3152 break; 3153 } 3154 if (wake) 3155 wake_up_all(&ci->i_cap_wq); 3156 while (put-- > 0) 3157 iput(inode); 3158 } 3159 3160 void ceph_put_cap_refs(struct ceph_inode_info *ci, int had) 3161 { 3162 __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_SYNC); 3163 } 3164 3165 void ceph_put_cap_refs_async(struct ceph_inode_info *ci, int had) 3166 { 3167 __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_ASYNC); 3168 } 3169 3170 void ceph_put_cap_refs_no_check_caps(struct ceph_inode_info *ci, int had) 3171 { 3172 __ceph_put_cap_refs(ci, had, PUT_CAP_REFS_NO_CHECK); 3173 } 3174 3175 /* 3176 * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap 3177 * context. Adjust per-snap dirty page accounting as appropriate. 3178 * Once all dirty data for a cap_snap is flushed, flush snapped file 3179 * metadata back to the MDS. If we dropped the last ref, call 3180 * ceph_check_caps. 3181 */ 3182 void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr, 3183 struct ceph_snap_context *snapc) 3184 { 3185 struct inode *inode = &ci->netfs.inode; 3186 struct ceph_cap_snap *capsnap = NULL, *iter; 3187 int put = 0; 3188 bool last = false; 3189 bool flush_snaps = false; 3190 bool complete_capsnap = false; 3191 3192 spin_lock(&ci->i_ceph_lock); 3193 ci->i_wrbuffer_ref -= nr; 3194 if (ci->i_wrbuffer_ref == 0) { 3195 last = true; 3196 put++; 3197 } 3198 3199 if (ci->i_head_snapc == snapc) { 3200 ci->i_wrbuffer_ref_head -= nr; 3201 if (ci->i_wrbuffer_ref_head == 0 && 3202 ci->i_wr_ref == 0 && 3203 ci->i_dirty_caps == 0 && 3204 ci->i_flushing_caps == 0) { 3205 BUG_ON(!ci->i_head_snapc); 3206 ceph_put_snap_context(ci->i_head_snapc); 3207 ci->i_head_snapc = NULL; 3208 } 3209 dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n", 3210 inode, 3211 ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr, 3212 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head, 3213 last ? " LAST" : ""); 3214 } else { 3215 list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) { 3216 if (iter->context == snapc) { 3217 capsnap = iter; 3218 break; 3219 } 3220 } 3221 3222 if (!capsnap) { 3223 /* 3224 * The capsnap should already be removed when removing 3225 * auth cap in the case of a forced unmount. 3226 */ 3227 WARN_ON_ONCE(ci->i_auth_cap); 3228 goto unlock; 3229 } 3230 3231 capsnap->dirty_pages -= nr; 3232 if (capsnap->dirty_pages == 0) { 3233 complete_capsnap = true; 3234 if (!capsnap->writing) { 3235 if (ceph_try_drop_cap_snap(ci, capsnap)) { 3236 put++; 3237 } else { 3238 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS; 3239 flush_snaps = true; 3240 } 3241 } 3242 } 3243 dout("put_wrbuffer_cap_refs on %p cap_snap %p " 3244 " snap %lld %d/%d -> %d/%d %s%s\n", 3245 inode, capsnap, capsnap->context->seq, 3246 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr, 3247 ci->i_wrbuffer_ref, capsnap->dirty_pages, 3248 last ? " (wrbuffer last)" : "", 3249 complete_capsnap ? " (complete capsnap)" : ""); 3250 } 3251 3252 unlock: 3253 spin_unlock(&ci->i_ceph_lock); 3254 3255 if (last) { 3256 ceph_check_caps(ci, 0); 3257 } else if (flush_snaps) { 3258 ceph_flush_snaps(ci, NULL); 3259 } 3260 if (complete_capsnap) 3261 wake_up_all(&ci->i_cap_wq); 3262 while (put-- > 0) { 3263 iput(inode); 3264 } 3265 } 3266 3267 /* 3268 * Invalidate unlinked inode's aliases, so we can drop the inode ASAP. 3269 */ 3270 static void invalidate_aliases(struct inode *inode) 3271 { 3272 struct dentry *dn, *prev = NULL; 3273 3274 dout("invalidate_aliases inode %p\n", inode); 3275 d_prune_aliases(inode); 3276 /* 3277 * For non-directory inode, d_find_alias() only returns 3278 * hashed dentry. After calling d_invalidate(), the 3279 * dentry becomes unhashed. 3280 * 3281 * For directory inode, d_find_alias() can return 3282 * unhashed dentry. But directory inode should have 3283 * one alias at most. 3284 */ 3285 while ((dn = d_find_alias(inode))) { 3286 if (dn == prev) { 3287 dput(dn); 3288 break; 3289 } 3290 d_invalidate(dn); 3291 if (prev) 3292 dput(prev); 3293 prev = dn; 3294 } 3295 if (prev) 3296 dput(prev); 3297 } 3298 3299 struct cap_extra_info { 3300 struct ceph_string *pool_ns; 3301 /* inline data */ 3302 u64 inline_version; 3303 void *inline_data; 3304 u32 inline_len; 3305 /* dirstat */ 3306 bool dirstat_valid; 3307 u64 nfiles; 3308 u64 nsubdirs; 3309 u64 change_attr; 3310 /* currently issued */ 3311 int issued; 3312 struct timespec64 btime; 3313 }; 3314 3315 /* 3316 * Handle a cap GRANT message from the MDS. (Note that a GRANT may 3317 * actually be a revocation if it specifies a smaller cap set.) 3318 * 3319 * caller holds s_mutex and i_ceph_lock, we drop both. 3320 */ 3321 static void handle_cap_grant(struct inode *inode, 3322 struct ceph_mds_session *session, 3323 struct ceph_cap *cap, 3324 struct ceph_mds_caps *grant, 3325 struct ceph_buffer *xattr_buf, 3326 struct cap_extra_info *extra_info) 3327 __releases(ci->i_ceph_lock) 3328 __releases(session->s_mdsc->snap_rwsem) 3329 { 3330 struct ceph_inode_info *ci = ceph_inode(inode); 3331 int seq = le32_to_cpu(grant->seq); 3332 int newcaps = le32_to_cpu(grant->caps); 3333 int used, wanted, dirty; 3334 u64 size = le64_to_cpu(grant->size); 3335 u64 max_size = le64_to_cpu(grant->max_size); 3336 unsigned char check_caps = 0; 3337 bool was_stale = cap->cap_gen < atomic_read(&session->s_cap_gen); 3338 bool wake = false; 3339 bool writeback = false; 3340 bool queue_trunc = false; 3341 bool queue_invalidate = false; 3342 bool deleted_inode = false; 3343 bool fill_inline = false; 3344 3345 dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n", 3346 inode, cap, session->s_mds, seq, ceph_cap_string(newcaps)); 3347 dout(" size %llu max_size %llu, i_size %llu\n", size, max_size, 3348 i_size_read(inode)); 3349 3350 3351 /* 3352 * If CACHE is being revoked, and we have no dirty buffers, 3353 * try to invalidate (once). (If there are dirty buffers, we 3354 * will invalidate _after_ writeback.) 3355 */ 3356 if (S_ISREG(inode->i_mode) && /* don't invalidate readdir cache */ 3357 ((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) && 3358 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 && 3359 !(ci->i_wrbuffer_ref || ci->i_wb_ref)) { 3360 if (try_nonblocking_invalidate(inode)) { 3361 /* there were locked pages.. invalidate later 3362 in a separate thread. */ 3363 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { 3364 queue_invalidate = true; 3365 ci->i_rdcache_revoking = ci->i_rdcache_gen; 3366 } 3367 } 3368 } 3369 3370 if (was_stale) 3371 cap->issued = cap->implemented = CEPH_CAP_PIN; 3372 3373 /* 3374 * auth mds of the inode changed. we received the cap export message, 3375 * but still haven't received the cap import message. handle_cap_export 3376 * updated the new auth MDS' cap. 3377 * 3378 * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing a message 3379 * that was sent before the cap import message. So don't remove caps. 3380 */ 3381 if (ceph_seq_cmp(seq, cap->seq) <= 0) { 3382 WARN_ON(cap != ci->i_auth_cap); 3383 WARN_ON(cap->cap_id != le64_to_cpu(grant->cap_id)); 3384 seq = cap->seq; 3385 newcaps |= cap->issued; 3386 } 3387 3388 /* side effects now are allowed */ 3389 cap->cap_gen = atomic_read(&session->s_cap_gen); 3390 cap->seq = seq; 3391 3392 __check_cap_issue(ci, cap, newcaps); 3393 3394 inode_set_max_iversion_raw(inode, extra_info->change_attr); 3395 3396 if ((newcaps & CEPH_CAP_AUTH_SHARED) && 3397 (extra_info->issued & CEPH_CAP_AUTH_EXCL) == 0) { 3398 umode_t mode = le32_to_cpu(grant->mode); 3399 3400 if (inode_wrong_type(inode, mode)) 3401 pr_warn_once("inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n", 3402 ceph_vinop(inode), inode->i_mode, mode); 3403 else 3404 inode->i_mode = mode; 3405 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid)); 3406 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid)); 3407 ci->i_btime = extra_info->btime; 3408 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, 3409 from_kuid(&init_user_ns, inode->i_uid), 3410 from_kgid(&init_user_ns, inode->i_gid)); 3411 } 3412 3413 if ((newcaps & CEPH_CAP_LINK_SHARED) && 3414 (extra_info->issued & CEPH_CAP_LINK_EXCL) == 0) { 3415 set_nlink(inode, le32_to_cpu(grant->nlink)); 3416 if (inode->i_nlink == 0) 3417 deleted_inode = true; 3418 } 3419 3420 if ((extra_info->issued & CEPH_CAP_XATTR_EXCL) == 0 && 3421 grant->xattr_len) { 3422 int len = le32_to_cpu(grant->xattr_len); 3423 u64 version = le64_to_cpu(grant->xattr_version); 3424 3425 if (version > ci->i_xattrs.version) { 3426 dout(" got new xattrs v%llu on %p len %d\n", 3427 version, inode, len); 3428 if (ci->i_xattrs.blob) 3429 ceph_buffer_put(ci->i_xattrs.blob); 3430 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf); 3431 ci->i_xattrs.version = version; 3432 ceph_forget_all_cached_acls(inode); 3433 ceph_security_invalidate_secctx(inode); 3434 } 3435 } 3436 3437 if (newcaps & CEPH_CAP_ANY_RD) { 3438 struct timespec64 mtime, atime, ctime; 3439 /* ctime/mtime/atime? */ 3440 ceph_decode_timespec64(&mtime, &grant->mtime); 3441 ceph_decode_timespec64(&atime, &grant->atime); 3442 ceph_decode_timespec64(&ctime, &grant->ctime); 3443 ceph_fill_file_time(inode, extra_info->issued, 3444 le32_to_cpu(grant->time_warp_seq), 3445 &ctime, &mtime, &atime); 3446 } 3447 3448 if ((newcaps & CEPH_CAP_FILE_SHARED) && extra_info->dirstat_valid) { 3449 ci->i_files = extra_info->nfiles; 3450 ci->i_subdirs = extra_info->nsubdirs; 3451 } 3452 3453 if (newcaps & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)) { 3454 /* file layout may have changed */ 3455 s64 old_pool = ci->i_layout.pool_id; 3456 struct ceph_string *old_ns; 3457 3458 ceph_file_layout_from_legacy(&ci->i_layout, &grant->layout); 3459 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns, 3460 lockdep_is_held(&ci->i_ceph_lock)); 3461 rcu_assign_pointer(ci->i_layout.pool_ns, extra_info->pool_ns); 3462 3463 if (ci->i_layout.pool_id != old_pool || 3464 extra_info->pool_ns != old_ns) 3465 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM; 3466 3467 extra_info->pool_ns = old_ns; 3468 3469 /* size/truncate_seq? */ 3470 queue_trunc = ceph_fill_file_size(inode, extra_info->issued, 3471 le32_to_cpu(grant->truncate_seq), 3472 le64_to_cpu(grant->truncate_size), 3473 size); 3474 } 3475 3476 if (ci->i_auth_cap == cap && (newcaps & CEPH_CAP_ANY_FILE_WR)) { 3477 if (max_size != ci->i_max_size) { 3478 dout("max_size %lld -> %llu\n", 3479 ci->i_max_size, max_size); 3480 ci->i_max_size = max_size; 3481 if (max_size >= ci->i_wanted_max_size) { 3482 ci->i_wanted_max_size = 0; /* reset */ 3483 ci->i_requested_max_size = 0; 3484 } 3485 wake = true; 3486 } 3487 } 3488 3489 /* check cap bits */ 3490 wanted = __ceph_caps_wanted(ci); 3491 used = __ceph_caps_used(ci); 3492 dirty = __ceph_caps_dirty(ci); 3493 dout(" my wanted = %s, used = %s, dirty %s\n", 3494 ceph_cap_string(wanted), 3495 ceph_cap_string(used), 3496 ceph_cap_string(dirty)); 3497 3498 if ((was_stale || le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) && 3499 (wanted & ~(cap->mds_wanted | newcaps))) { 3500 /* 3501 * If mds is importing cap, prior cap messages that update 3502 * 'wanted' may get dropped by mds (migrate seq mismatch). 3503 * 3504 * We don't send cap message to update 'wanted' if what we 3505 * want are already issued. If mds revokes caps, cap message 3506 * that releases caps also tells mds what we want. But if 3507 * caps got revoked by mds forcedly (session stale). We may 3508 * haven't told mds what we want. 3509 */ 3510 check_caps = 1; 3511 } 3512 3513 /* revocation, grant, or no-op? */ 3514 if (cap->issued & ~newcaps) { 3515 int revoking = cap->issued & ~newcaps; 3516 3517 dout("revocation: %s -> %s (revoking %s)\n", 3518 ceph_cap_string(cap->issued), 3519 ceph_cap_string(newcaps), 3520 ceph_cap_string(revoking)); 3521 if (S_ISREG(inode->i_mode) && 3522 (revoking & used & CEPH_CAP_FILE_BUFFER)) 3523 writeback = true; /* initiate writeback; will delay ack */ 3524 else if (queue_invalidate && 3525 revoking == CEPH_CAP_FILE_CACHE && 3526 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0) 3527 ; /* do nothing yet, invalidation will be queued */ 3528 else if (cap == ci->i_auth_cap) 3529 check_caps = 1; /* check auth cap only */ 3530 else 3531 check_caps = 2; /* check all caps */ 3532 /* If there is new caps, try to wake up the waiters */ 3533 if (~cap->issued & newcaps) 3534 wake = true; 3535 cap->issued = newcaps; 3536 cap->implemented |= newcaps; 3537 } else if (cap->issued == newcaps) { 3538 dout("caps unchanged: %s -> %s\n", 3539 ceph_cap_string(cap->issued), ceph_cap_string(newcaps)); 3540 } else { 3541 dout("grant: %s -> %s\n", ceph_cap_string(cap->issued), 3542 ceph_cap_string(newcaps)); 3543 /* non-auth MDS is revoking the newly grant caps ? */ 3544 if (cap == ci->i_auth_cap && 3545 __ceph_caps_revoking_other(ci, cap, newcaps)) 3546 check_caps = 2; 3547 3548 cap->issued = newcaps; 3549 cap->implemented |= newcaps; /* add bits only, to 3550 * avoid stepping on a 3551 * pending revocation */ 3552 wake = true; 3553 } 3554 BUG_ON(cap->issued & ~cap->implemented); 3555 3556 if (extra_info->inline_version > 0 && 3557 extra_info->inline_version >= ci->i_inline_version) { 3558 ci->i_inline_version = extra_info->inline_version; 3559 if (ci->i_inline_version != CEPH_INLINE_NONE && 3560 (newcaps & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO))) 3561 fill_inline = true; 3562 } 3563 3564 if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) { 3565 if (ci->i_auth_cap == cap) { 3566 if (newcaps & ~extra_info->issued) 3567 wake = true; 3568 3569 if (ci->i_requested_max_size > max_size || 3570 !(le32_to_cpu(grant->wanted) & CEPH_CAP_ANY_FILE_WR)) { 3571 /* re-request max_size if necessary */ 3572 ci->i_requested_max_size = 0; 3573 wake = true; 3574 } 3575 3576 ceph_kick_flushing_inode_caps(session, ci); 3577 } 3578 up_read(&session->s_mdsc->snap_rwsem); 3579 } 3580 spin_unlock(&ci->i_ceph_lock); 3581 3582 if (fill_inline) 3583 ceph_fill_inline_data(inode, NULL, extra_info->inline_data, 3584 extra_info->inline_len); 3585 3586 if (queue_trunc) 3587 ceph_queue_vmtruncate(inode); 3588 3589 if (writeback) 3590 /* 3591 * queue inode for writeback: we can't actually call 3592 * filemap_write_and_wait, etc. from message handler 3593 * context. 3594 */ 3595 ceph_queue_writeback(inode); 3596 if (queue_invalidate) 3597 ceph_queue_invalidate(inode); 3598 if (deleted_inode) 3599 invalidate_aliases(inode); 3600 if (wake) 3601 wake_up_all(&ci->i_cap_wq); 3602 3603 mutex_unlock(&session->s_mutex); 3604 if (check_caps == 1) 3605 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_NOINVAL); 3606 else if (check_caps == 2) 3607 ceph_check_caps(ci, CHECK_CAPS_NOINVAL); 3608 } 3609 3610 /* 3611 * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the 3612 * MDS has been safely committed. 3613 */ 3614 static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid, 3615 struct ceph_mds_caps *m, 3616 struct ceph_mds_session *session, 3617 struct ceph_cap *cap) 3618 __releases(ci->i_ceph_lock) 3619 { 3620 struct ceph_inode_info *ci = ceph_inode(inode); 3621 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 3622 struct ceph_cap_flush *cf, *tmp_cf; 3623 LIST_HEAD(to_remove); 3624 unsigned seq = le32_to_cpu(m->seq); 3625 int dirty = le32_to_cpu(m->dirty); 3626 int cleaned = 0; 3627 bool drop = false; 3628 bool wake_ci = false; 3629 bool wake_mdsc = false; 3630 3631 list_for_each_entry_safe(cf, tmp_cf, &ci->i_cap_flush_list, i_list) { 3632 /* Is this the one that was flushed? */ 3633 if (cf->tid == flush_tid) 3634 cleaned = cf->caps; 3635 3636 /* Is this a capsnap? */ 3637 if (cf->is_capsnap) 3638 continue; 3639 3640 if (cf->tid <= flush_tid) { 3641 /* 3642 * An earlier or current tid. The FLUSH_ACK should 3643 * represent a superset of this flush's caps. 3644 */ 3645 wake_ci |= __detach_cap_flush_from_ci(ci, cf); 3646 list_add_tail(&cf->i_list, &to_remove); 3647 } else { 3648 /* 3649 * This is a later one. Any caps in it are still dirty 3650 * so don't count them as cleaned. 3651 */ 3652 cleaned &= ~cf->caps; 3653 if (!cleaned) 3654 break; 3655 } 3656 } 3657 3658 dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s," 3659 " flushing %s -> %s\n", 3660 inode, session->s_mds, seq, ceph_cap_string(dirty), 3661 ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps), 3662 ceph_cap_string(ci->i_flushing_caps & ~cleaned)); 3663 3664 if (list_empty(&to_remove) && !cleaned) 3665 goto out; 3666 3667 ci->i_flushing_caps &= ~cleaned; 3668 3669 spin_lock(&mdsc->cap_dirty_lock); 3670 3671 list_for_each_entry(cf, &to_remove, i_list) 3672 wake_mdsc |= __detach_cap_flush_from_mdsc(mdsc, cf); 3673 3674 if (ci->i_flushing_caps == 0) { 3675 if (list_empty(&ci->i_cap_flush_list)) { 3676 list_del_init(&ci->i_flushing_item); 3677 if (!list_empty(&session->s_cap_flushing)) { 3678 dout(" mds%d still flushing cap on %p\n", 3679 session->s_mds, 3680 &list_first_entry(&session->s_cap_flushing, 3681 struct ceph_inode_info, 3682 i_flushing_item)->netfs.inode); 3683 } 3684 } 3685 mdsc->num_cap_flushing--; 3686 dout(" inode %p now !flushing\n", inode); 3687 3688 if (ci->i_dirty_caps == 0) { 3689 dout(" inode %p now clean\n", inode); 3690 BUG_ON(!list_empty(&ci->i_dirty_item)); 3691 drop = true; 3692 if (ci->i_wr_ref == 0 && 3693 ci->i_wrbuffer_ref_head == 0) { 3694 BUG_ON(!ci->i_head_snapc); 3695 ceph_put_snap_context(ci->i_head_snapc); 3696 ci->i_head_snapc = NULL; 3697 } 3698 } else { 3699 BUG_ON(list_empty(&ci->i_dirty_item)); 3700 } 3701 } 3702 spin_unlock(&mdsc->cap_dirty_lock); 3703 3704 out: 3705 spin_unlock(&ci->i_ceph_lock); 3706 3707 while (!list_empty(&to_remove)) { 3708 cf = list_first_entry(&to_remove, 3709 struct ceph_cap_flush, i_list); 3710 list_del_init(&cf->i_list); 3711 if (!cf->is_capsnap) 3712 ceph_free_cap_flush(cf); 3713 } 3714 3715 if (wake_ci) 3716 wake_up_all(&ci->i_cap_wq); 3717 if (wake_mdsc) 3718 wake_up_all(&mdsc->cap_flushing_wq); 3719 if (drop) 3720 iput(inode); 3721 } 3722 3723 void __ceph_remove_capsnap(struct inode *inode, struct ceph_cap_snap *capsnap, 3724 bool *wake_ci, bool *wake_mdsc) 3725 { 3726 struct ceph_inode_info *ci = ceph_inode(inode); 3727 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 3728 bool ret; 3729 3730 lockdep_assert_held(&ci->i_ceph_lock); 3731 3732 dout("removing capsnap %p, inode %p ci %p\n", capsnap, inode, ci); 3733 3734 list_del_init(&capsnap->ci_item); 3735 ret = __detach_cap_flush_from_ci(ci, &capsnap->cap_flush); 3736 if (wake_ci) 3737 *wake_ci = ret; 3738 3739 spin_lock(&mdsc->cap_dirty_lock); 3740 if (list_empty(&ci->i_cap_flush_list)) 3741 list_del_init(&ci->i_flushing_item); 3742 3743 ret = __detach_cap_flush_from_mdsc(mdsc, &capsnap->cap_flush); 3744 if (wake_mdsc) 3745 *wake_mdsc = ret; 3746 spin_unlock(&mdsc->cap_dirty_lock); 3747 } 3748 3749 void ceph_remove_capsnap(struct inode *inode, struct ceph_cap_snap *capsnap, 3750 bool *wake_ci, bool *wake_mdsc) 3751 { 3752 struct ceph_inode_info *ci = ceph_inode(inode); 3753 3754 lockdep_assert_held(&ci->i_ceph_lock); 3755 3756 WARN_ON_ONCE(capsnap->dirty_pages || capsnap->writing); 3757 __ceph_remove_capsnap(inode, capsnap, wake_ci, wake_mdsc); 3758 } 3759 3760 /* 3761 * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can 3762 * throw away our cap_snap. 3763 * 3764 * Caller hold s_mutex. 3765 */ 3766 static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid, 3767 struct ceph_mds_caps *m, 3768 struct ceph_mds_session *session) 3769 { 3770 struct ceph_inode_info *ci = ceph_inode(inode); 3771 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 3772 u64 follows = le64_to_cpu(m->snap_follows); 3773 struct ceph_cap_snap *capsnap = NULL, *iter; 3774 bool wake_ci = false; 3775 bool wake_mdsc = false; 3776 3777 dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n", 3778 inode, ci, session->s_mds, follows); 3779 3780 spin_lock(&ci->i_ceph_lock); 3781 list_for_each_entry(iter, &ci->i_cap_snaps, ci_item) { 3782 if (iter->follows == follows) { 3783 if (iter->cap_flush.tid != flush_tid) { 3784 dout(" cap_snap %p follows %lld tid %lld !=" 3785 " %lld\n", iter, follows, 3786 flush_tid, iter->cap_flush.tid); 3787 break; 3788 } 3789 capsnap = iter; 3790 break; 3791 } else { 3792 dout(" skipping cap_snap %p follows %lld\n", 3793 iter, iter->follows); 3794 } 3795 } 3796 if (capsnap) 3797 ceph_remove_capsnap(inode, capsnap, &wake_ci, &wake_mdsc); 3798 spin_unlock(&ci->i_ceph_lock); 3799 3800 if (capsnap) { 3801 ceph_put_snap_context(capsnap->context); 3802 ceph_put_cap_snap(capsnap); 3803 if (wake_ci) 3804 wake_up_all(&ci->i_cap_wq); 3805 if (wake_mdsc) 3806 wake_up_all(&mdsc->cap_flushing_wq); 3807 iput(inode); 3808 } 3809 } 3810 3811 /* 3812 * Handle TRUNC from MDS, indicating file truncation. 3813 * 3814 * caller hold s_mutex. 3815 */ 3816 static bool handle_cap_trunc(struct inode *inode, 3817 struct ceph_mds_caps *trunc, 3818 struct ceph_mds_session *session) 3819 { 3820 struct ceph_inode_info *ci = ceph_inode(inode); 3821 int mds = session->s_mds; 3822 int seq = le32_to_cpu(trunc->seq); 3823 u32 truncate_seq = le32_to_cpu(trunc->truncate_seq); 3824 u64 truncate_size = le64_to_cpu(trunc->truncate_size); 3825 u64 size = le64_to_cpu(trunc->size); 3826 int implemented = 0; 3827 int dirty = __ceph_caps_dirty(ci); 3828 int issued = __ceph_caps_issued(ceph_inode(inode), &implemented); 3829 bool queue_trunc = false; 3830 3831 lockdep_assert_held(&ci->i_ceph_lock); 3832 3833 issued |= implemented | dirty; 3834 3835 dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n", 3836 inode, mds, seq, truncate_size, truncate_seq); 3837 queue_trunc = ceph_fill_file_size(inode, issued, 3838 truncate_seq, truncate_size, size); 3839 return queue_trunc; 3840 } 3841 3842 /* 3843 * Handle EXPORT from MDS. Cap is being migrated _from_ this mds to a 3844 * different one. If we are the most recent migration we've seen (as 3845 * indicated by mseq), make note of the migrating cap bits for the 3846 * duration (until we see the corresponding IMPORT). 3847 * 3848 * caller holds s_mutex 3849 */ 3850 static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex, 3851 struct ceph_mds_cap_peer *ph, 3852 struct ceph_mds_session *session) 3853 { 3854 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 3855 struct ceph_mds_session *tsession = NULL; 3856 struct ceph_cap *cap, *tcap, *new_cap = NULL; 3857 struct ceph_inode_info *ci = ceph_inode(inode); 3858 u64 t_cap_id; 3859 unsigned mseq = le32_to_cpu(ex->migrate_seq); 3860 unsigned t_seq, t_mseq; 3861 int target, issued; 3862 int mds = session->s_mds; 3863 3864 if (ph) { 3865 t_cap_id = le64_to_cpu(ph->cap_id); 3866 t_seq = le32_to_cpu(ph->seq); 3867 t_mseq = le32_to_cpu(ph->mseq); 3868 target = le32_to_cpu(ph->mds); 3869 } else { 3870 t_cap_id = t_seq = t_mseq = 0; 3871 target = -1; 3872 } 3873 3874 dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n", 3875 inode, ci, mds, mseq, target); 3876 retry: 3877 down_read(&mdsc->snap_rwsem); 3878 spin_lock(&ci->i_ceph_lock); 3879 cap = __get_cap_for_mds(ci, mds); 3880 if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id)) 3881 goto out_unlock; 3882 3883 if (target < 0) { 3884 ceph_remove_cap(cap, false); 3885 goto out_unlock; 3886 } 3887 3888 /* 3889 * now we know we haven't received the cap import message yet 3890 * because the exported cap still exist. 3891 */ 3892 3893 issued = cap->issued; 3894 if (issued != cap->implemented) 3895 pr_err_ratelimited("handle_cap_export: issued != implemented: " 3896 "ino (%llx.%llx) mds%d seq %d mseq %d " 3897 "issued %s implemented %s\n", 3898 ceph_vinop(inode), mds, cap->seq, cap->mseq, 3899 ceph_cap_string(issued), 3900 ceph_cap_string(cap->implemented)); 3901 3902 3903 tcap = __get_cap_for_mds(ci, target); 3904 if (tcap) { 3905 /* already have caps from the target */ 3906 if (tcap->cap_id == t_cap_id && 3907 ceph_seq_cmp(tcap->seq, t_seq) < 0) { 3908 dout(" updating import cap %p mds%d\n", tcap, target); 3909 tcap->cap_id = t_cap_id; 3910 tcap->seq = t_seq - 1; 3911 tcap->issue_seq = t_seq - 1; 3912 tcap->issued |= issued; 3913 tcap->implemented |= issued; 3914 if (cap == ci->i_auth_cap) { 3915 ci->i_auth_cap = tcap; 3916 change_auth_cap_ses(ci, tcap->session); 3917 } 3918 } 3919 ceph_remove_cap(cap, false); 3920 goto out_unlock; 3921 } else if (tsession) { 3922 /* add placeholder for the export tagert */ 3923 int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0; 3924 tcap = new_cap; 3925 ceph_add_cap(inode, tsession, t_cap_id, issued, 0, 3926 t_seq - 1, t_mseq, (u64)-1, flag, &new_cap); 3927 3928 if (!list_empty(&ci->i_cap_flush_list) && 3929 ci->i_auth_cap == tcap) { 3930 spin_lock(&mdsc->cap_dirty_lock); 3931 list_move_tail(&ci->i_flushing_item, 3932 &tcap->session->s_cap_flushing); 3933 spin_unlock(&mdsc->cap_dirty_lock); 3934 } 3935 3936 ceph_remove_cap(cap, false); 3937 goto out_unlock; 3938 } 3939 3940 spin_unlock(&ci->i_ceph_lock); 3941 up_read(&mdsc->snap_rwsem); 3942 mutex_unlock(&session->s_mutex); 3943 3944 /* open target session */ 3945 tsession = ceph_mdsc_open_export_target_session(mdsc, target); 3946 if (!IS_ERR(tsession)) { 3947 if (mds > target) { 3948 mutex_lock(&session->s_mutex); 3949 mutex_lock_nested(&tsession->s_mutex, 3950 SINGLE_DEPTH_NESTING); 3951 } else { 3952 mutex_lock(&tsession->s_mutex); 3953 mutex_lock_nested(&session->s_mutex, 3954 SINGLE_DEPTH_NESTING); 3955 } 3956 new_cap = ceph_get_cap(mdsc, NULL); 3957 } else { 3958 WARN_ON(1); 3959 tsession = NULL; 3960 target = -1; 3961 mutex_lock(&session->s_mutex); 3962 } 3963 goto retry; 3964 3965 out_unlock: 3966 spin_unlock(&ci->i_ceph_lock); 3967 up_read(&mdsc->snap_rwsem); 3968 mutex_unlock(&session->s_mutex); 3969 if (tsession) { 3970 mutex_unlock(&tsession->s_mutex); 3971 ceph_put_mds_session(tsession); 3972 } 3973 if (new_cap) 3974 ceph_put_cap(mdsc, new_cap); 3975 } 3976 3977 /* 3978 * Handle cap IMPORT. 3979 * 3980 * caller holds s_mutex. acquires i_ceph_lock 3981 */ 3982 static void handle_cap_import(struct ceph_mds_client *mdsc, 3983 struct inode *inode, struct ceph_mds_caps *im, 3984 struct ceph_mds_cap_peer *ph, 3985 struct ceph_mds_session *session, 3986 struct ceph_cap **target_cap, int *old_issued) 3987 { 3988 struct ceph_inode_info *ci = ceph_inode(inode); 3989 struct ceph_cap *cap, *ocap, *new_cap = NULL; 3990 int mds = session->s_mds; 3991 int issued; 3992 unsigned caps = le32_to_cpu(im->caps); 3993 unsigned wanted = le32_to_cpu(im->wanted); 3994 unsigned seq = le32_to_cpu(im->seq); 3995 unsigned mseq = le32_to_cpu(im->migrate_seq); 3996 u64 realmino = le64_to_cpu(im->realm); 3997 u64 cap_id = le64_to_cpu(im->cap_id); 3998 u64 p_cap_id; 3999 int peer; 4000 4001 if (ph) { 4002 p_cap_id = le64_to_cpu(ph->cap_id); 4003 peer = le32_to_cpu(ph->mds); 4004 } else { 4005 p_cap_id = 0; 4006 peer = -1; 4007 } 4008 4009 dout("handle_cap_import inode %p ci %p mds%d mseq %d peer %d\n", 4010 inode, ci, mds, mseq, peer); 4011 retry: 4012 cap = __get_cap_for_mds(ci, mds); 4013 if (!cap) { 4014 if (!new_cap) { 4015 spin_unlock(&ci->i_ceph_lock); 4016 new_cap = ceph_get_cap(mdsc, NULL); 4017 spin_lock(&ci->i_ceph_lock); 4018 goto retry; 4019 } 4020 cap = new_cap; 4021 } else { 4022 if (new_cap) { 4023 ceph_put_cap(mdsc, new_cap); 4024 new_cap = NULL; 4025 } 4026 } 4027 4028 __ceph_caps_issued(ci, &issued); 4029 issued |= __ceph_caps_dirty(ci); 4030 4031 ceph_add_cap(inode, session, cap_id, caps, wanted, seq, mseq, 4032 realmino, CEPH_CAP_FLAG_AUTH, &new_cap); 4033 4034 ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL; 4035 if (ocap && ocap->cap_id == p_cap_id) { 4036 dout(" remove export cap %p mds%d flags %d\n", 4037 ocap, peer, ph->flags); 4038 if ((ph->flags & CEPH_CAP_FLAG_AUTH) && 4039 (ocap->seq != le32_to_cpu(ph->seq) || 4040 ocap->mseq != le32_to_cpu(ph->mseq))) { 4041 pr_err_ratelimited("handle_cap_import: " 4042 "mismatched seq/mseq: ino (%llx.%llx) " 4043 "mds%d seq %d mseq %d importer mds%d " 4044 "has peer seq %d mseq %d\n", 4045 ceph_vinop(inode), peer, ocap->seq, 4046 ocap->mseq, mds, le32_to_cpu(ph->seq), 4047 le32_to_cpu(ph->mseq)); 4048 } 4049 ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE)); 4050 } 4051 4052 *old_issued = issued; 4053 *target_cap = cap; 4054 } 4055 4056 /* 4057 * Handle a caps message from the MDS. 4058 * 4059 * Identify the appropriate session, inode, and call the right handler 4060 * based on the cap op. 4061 */ 4062 void ceph_handle_caps(struct ceph_mds_session *session, 4063 struct ceph_msg *msg) 4064 { 4065 struct ceph_mds_client *mdsc = session->s_mdsc; 4066 struct inode *inode; 4067 struct ceph_inode_info *ci; 4068 struct ceph_cap *cap; 4069 struct ceph_mds_caps *h; 4070 struct ceph_mds_cap_peer *peer = NULL; 4071 struct ceph_snap_realm *realm = NULL; 4072 int op; 4073 int msg_version = le16_to_cpu(msg->hdr.version); 4074 u32 seq, mseq; 4075 struct ceph_vino vino; 4076 void *snaptrace; 4077 size_t snaptrace_len; 4078 void *p, *end; 4079 struct cap_extra_info extra_info = {}; 4080 bool queue_trunc; 4081 4082 dout("handle_caps from mds%d\n", session->s_mds); 4083 4084 /* decode */ 4085 end = msg->front.iov_base + msg->front.iov_len; 4086 if (msg->front.iov_len < sizeof(*h)) 4087 goto bad; 4088 h = msg->front.iov_base; 4089 op = le32_to_cpu(h->op); 4090 vino.ino = le64_to_cpu(h->ino); 4091 vino.snap = CEPH_NOSNAP; 4092 seq = le32_to_cpu(h->seq); 4093 mseq = le32_to_cpu(h->migrate_seq); 4094 4095 snaptrace = h + 1; 4096 snaptrace_len = le32_to_cpu(h->snap_trace_len); 4097 p = snaptrace + snaptrace_len; 4098 4099 if (msg_version >= 2) { 4100 u32 flock_len; 4101 ceph_decode_32_safe(&p, end, flock_len, bad); 4102 if (p + flock_len > end) 4103 goto bad; 4104 p += flock_len; 4105 } 4106 4107 if (msg_version >= 3) { 4108 if (op == CEPH_CAP_OP_IMPORT) { 4109 if (p + sizeof(*peer) > end) 4110 goto bad; 4111 peer = p; 4112 p += sizeof(*peer); 4113 } else if (op == CEPH_CAP_OP_EXPORT) { 4114 /* recorded in unused fields */ 4115 peer = (void *)&h->size; 4116 } 4117 } 4118 4119 if (msg_version >= 4) { 4120 ceph_decode_64_safe(&p, end, extra_info.inline_version, bad); 4121 ceph_decode_32_safe(&p, end, extra_info.inline_len, bad); 4122 if (p + extra_info.inline_len > end) 4123 goto bad; 4124 extra_info.inline_data = p; 4125 p += extra_info.inline_len; 4126 } 4127 4128 if (msg_version >= 5) { 4129 struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc; 4130 u32 epoch_barrier; 4131 4132 ceph_decode_32_safe(&p, end, epoch_barrier, bad); 4133 ceph_osdc_update_epoch_barrier(osdc, epoch_barrier); 4134 } 4135 4136 if (msg_version >= 8) { 4137 u32 pool_ns_len; 4138 4139 /* version >= 6 */ 4140 ceph_decode_skip_64(&p, end, bad); // flush_tid 4141 /* version >= 7 */ 4142 ceph_decode_skip_32(&p, end, bad); // caller_uid 4143 ceph_decode_skip_32(&p, end, bad); // caller_gid 4144 /* version >= 8 */ 4145 ceph_decode_32_safe(&p, end, pool_ns_len, bad); 4146 if (pool_ns_len > 0) { 4147 ceph_decode_need(&p, end, pool_ns_len, bad); 4148 extra_info.pool_ns = 4149 ceph_find_or_create_string(p, pool_ns_len); 4150 p += pool_ns_len; 4151 } 4152 } 4153 4154 if (msg_version >= 9) { 4155 struct ceph_timespec *btime; 4156 4157 if (p + sizeof(*btime) > end) 4158 goto bad; 4159 btime = p; 4160 ceph_decode_timespec64(&extra_info.btime, btime); 4161 p += sizeof(*btime); 4162 ceph_decode_64_safe(&p, end, extra_info.change_attr, bad); 4163 } 4164 4165 if (msg_version >= 11) { 4166 /* version >= 10 */ 4167 ceph_decode_skip_32(&p, end, bad); // flags 4168 /* version >= 11 */ 4169 extra_info.dirstat_valid = true; 4170 ceph_decode_64_safe(&p, end, extra_info.nfiles, bad); 4171 ceph_decode_64_safe(&p, end, extra_info.nsubdirs, bad); 4172 } 4173 4174 /* lookup ino */ 4175 inode = ceph_find_inode(mdsc->fsc->sb, vino); 4176 dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino, 4177 vino.snap, inode); 4178 4179 mutex_lock(&session->s_mutex); 4180 inc_session_sequence(session); 4181 dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq, 4182 (unsigned)seq); 4183 4184 if (!inode) { 4185 dout(" i don't have ino %llx\n", vino.ino); 4186 4187 if (op == CEPH_CAP_OP_IMPORT) { 4188 cap = ceph_get_cap(mdsc, NULL); 4189 cap->cap_ino = vino.ino; 4190 cap->queue_release = 1; 4191 cap->cap_id = le64_to_cpu(h->cap_id); 4192 cap->mseq = mseq; 4193 cap->seq = seq; 4194 cap->issue_seq = seq; 4195 spin_lock(&session->s_cap_lock); 4196 __ceph_queue_cap_release(session, cap); 4197 spin_unlock(&session->s_cap_lock); 4198 } 4199 goto flush_cap_releases; 4200 } 4201 ci = ceph_inode(inode); 4202 4203 /* these will work even if we don't have a cap yet */ 4204 switch (op) { 4205 case CEPH_CAP_OP_FLUSHSNAP_ACK: 4206 handle_cap_flushsnap_ack(inode, le64_to_cpu(msg->hdr.tid), 4207 h, session); 4208 goto done; 4209 4210 case CEPH_CAP_OP_EXPORT: 4211 handle_cap_export(inode, h, peer, session); 4212 goto done_unlocked; 4213 4214 case CEPH_CAP_OP_IMPORT: 4215 realm = NULL; 4216 if (snaptrace_len) { 4217 down_write(&mdsc->snap_rwsem); 4218 ceph_update_snap_trace(mdsc, snaptrace, 4219 snaptrace + snaptrace_len, 4220 false, &realm); 4221 downgrade_write(&mdsc->snap_rwsem); 4222 } else { 4223 down_read(&mdsc->snap_rwsem); 4224 } 4225 spin_lock(&ci->i_ceph_lock); 4226 handle_cap_import(mdsc, inode, h, peer, session, 4227 &cap, &extra_info.issued); 4228 handle_cap_grant(inode, session, cap, 4229 h, msg->middle, &extra_info); 4230 if (realm) 4231 ceph_put_snap_realm(mdsc, realm); 4232 goto done_unlocked; 4233 } 4234 4235 /* the rest require a cap */ 4236 spin_lock(&ci->i_ceph_lock); 4237 cap = __get_cap_for_mds(ceph_inode(inode), session->s_mds); 4238 if (!cap) { 4239 dout(" no cap on %p ino %llx.%llx from mds%d\n", 4240 inode, ceph_ino(inode), ceph_snap(inode), 4241 session->s_mds); 4242 spin_unlock(&ci->i_ceph_lock); 4243 goto flush_cap_releases; 4244 } 4245 4246 /* note that each of these drops i_ceph_lock for us */ 4247 switch (op) { 4248 case CEPH_CAP_OP_REVOKE: 4249 case CEPH_CAP_OP_GRANT: 4250 __ceph_caps_issued(ci, &extra_info.issued); 4251 extra_info.issued |= __ceph_caps_dirty(ci); 4252 handle_cap_grant(inode, session, cap, 4253 h, msg->middle, &extra_info); 4254 goto done_unlocked; 4255 4256 case CEPH_CAP_OP_FLUSH_ACK: 4257 handle_cap_flush_ack(inode, le64_to_cpu(msg->hdr.tid), 4258 h, session, cap); 4259 break; 4260 4261 case CEPH_CAP_OP_TRUNC: 4262 queue_trunc = handle_cap_trunc(inode, h, session); 4263 spin_unlock(&ci->i_ceph_lock); 4264 if (queue_trunc) 4265 ceph_queue_vmtruncate(inode); 4266 break; 4267 4268 default: 4269 spin_unlock(&ci->i_ceph_lock); 4270 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op, 4271 ceph_cap_op_name(op)); 4272 } 4273 4274 done: 4275 mutex_unlock(&session->s_mutex); 4276 done_unlocked: 4277 iput(inode); 4278 out: 4279 ceph_put_string(extra_info.pool_ns); 4280 return; 4281 4282 flush_cap_releases: 4283 /* 4284 * send any cap release message to try to move things 4285 * along for the mds (who clearly thinks we still have this 4286 * cap). 4287 */ 4288 ceph_flush_cap_releases(mdsc, session); 4289 goto done; 4290 4291 bad: 4292 pr_err("ceph_handle_caps: corrupt message\n"); 4293 ceph_msg_dump(msg); 4294 goto out; 4295 } 4296 4297 /* 4298 * Delayed work handler to process end of delayed cap release LRU list. 4299 * 4300 * If new caps are added to the list while processing it, these won't get 4301 * processed in this run. In this case, the ci->i_hold_caps_max will be 4302 * returned so that the work can be scheduled accordingly. 4303 */ 4304 unsigned long ceph_check_delayed_caps(struct ceph_mds_client *mdsc) 4305 { 4306 struct inode *inode; 4307 struct ceph_inode_info *ci; 4308 struct ceph_mount_options *opt = mdsc->fsc->mount_options; 4309 unsigned long delay_max = opt->caps_wanted_delay_max * HZ; 4310 unsigned long loop_start = jiffies; 4311 unsigned long delay = 0; 4312 4313 dout("check_delayed_caps\n"); 4314 spin_lock(&mdsc->cap_delay_lock); 4315 while (!list_empty(&mdsc->cap_delay_list)) { 4316 ci = list_first_entry(&mdsc->cap_delay_list, 4317 struct ceph_inode_info, 4318 i_cap_delay_list); 4319 if (time_before(loop_start, ci->i_hold_caps_max - delay_max)) { 4320 dout("%s caps added recently. Exiting loop", __func__); 4321 delay = ci->i_hold_caps_max; 4322 break; 4323 } 4324 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 && 4325 time_before(jiffies, ci->i_hold_caps_max)) 4326 break; 4327 list_del_init(&ci->i_cap_delay_list); 4328 4329 inode = igrab(&ci->netfs.inode); 4330 if (inode) { 4331 spin_unlock(&mdsc->cap_delay_lock); 4332 dout("check_delayed_caps on %p\n", inode); 4333 ceph_check_caps(ci, 0); 4334 iput(inode); 4335 spin_lock(&mdsc->cap_delay_lock); 4336 } 4337 } 4338 spin_unlock(&mdsc->cap_delay_lock); 4339 4340 return delay; 4341 } 4342 4343 /* 4344 * Flush all dirty caps to the mds 4345 */ 4346 static void flush_dirty_session_caps(struct ceph_mds_session *s) 4347 { 4348 struct ceph_mds_client *mdsc = s->s_mdsc; 4349 struct ceph_inode_info *ci; 4350 struct inode *inode; 4351 4352 dout("flush_dirty_caps\n"); 4353 spin_lock(&mdsc->cap_dirty_lock); 4354 while (!list_empty(&s->s_cap_dirty)) { 4355 ci = list_first_entry(&s->s_cap_dirty, struct ceph_inode_info, 4356 i_dirty_item); 4357 inode = &ci->netfs.inode; 4358 ihold(inode); 4359 dout("flush_dirty_caps %llx.%llx\n", ceph_vinop(inode)); 4360 spin_unlock(&mdsc->cap_dirty_lock); 4361 ceph_wait_on_async_create(inode); 4362 ceph_check_caps(ci, CHECK_CAPS_FLUSH); 4363 iput(inode); 4364 spin_lock(&mdsc->cap_dirty_lock); 4365 } 4366 spin_unlock(&mdsc->cap_dirty_lock); 4367 dout("flush_dirty_caps done\n"); 4368 } 4369 4370 void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) 4371 { 4372 ceph_mdsc_iterate_sessions(mdsc, flush_dirty_session_caps, true); 4373 } 4374 4375 void __ceph_touch_fmode(struct ceph_inode_info *ci, 4376 struct ceph_mds_client *mdsc, int fmode) 4377 { 4378 unsigned long now = jiffies; 4379 if (fmode & CEPH_FILE_MODE_RD) 4380 ci->i_last_rd = now; 4381 if (fmode & CEPH_FILE_MODE_WR) 4382 ci->i_last_wr = now; 4383 /* queue periodic check */ 4384 if (fmode && 4385 __ceph_is_any_real_caps(ci) && 4386 list_empty(&ci->i_cap_delay_list)) 4387 __cap_delay_requeue(mdsc, ci); 4388 } 4389 4390 void ceph_get_fmode(struct ceph_inode_info *ci, int fmode, int count) 4391 { 4392 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb); 4393 int bits = (fmode << 1) | 1; 4394 bool already_opened = false; 4395 int i; 4396 4397 if (count == 1) 4398 atomic64_inc(&mdsc->metric.opened_files); 4399 4400 spin_lock(&ci->i_ceph_lock); 4401 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) { 4402 /* 4403 * If any of the mode ref is larger than 0, 4404 * that means it has been already opened by 4405 * others. Just skip checking the PIN ref. 4406 */ 4407 if (i && ci->i_nr_by_mode[i]) 4408 already_opened = true; 4409 4410 if (bits & (1 << i)) 4411 ci->i_nr_by_mode[i] += count; 4412 } 4413 4414 if (!already_opened) 4415 percpu_counter_inc(&mdsc->metric.opened_inodes); 4416 spin_unlock(&ci->i_ceph_lock); 4417 } 4418 4419 /* 4420 * Drop open file reference. If we were the last open file, 4421 * we may need to release capabilities to the MDS (or schedule 4422 * their delayed release). 4423 */ 4424 void ceph_put_fmode(struct ceph_inode_info *ci, int fmode, int count) 4425 { 4426 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(ci->netfs.inode.i_sb); 4427 int bits = (fmode << 1) | 1; 4428 bool is_closed = true; 4429 int i; 4430 4431 if (count == 1) 4432 atomic64_dec(&mdsc->metric.opened_files); 4433 4434 spin_lock(&ci->i_ceph_lock); 4435 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) { 4436 if (bits & (1 << i)) { 4437 BUG_ON(ci->i_nr_by_mode[i] < count); 4438 ci->i_nr_by_mode[i] -= count; 4439 } 4440 4441 /* 4442 * If any of the mode ref is not 0 after 4443 * decreased, that means it is still opened 4444 * by others. Just skip checking the PIN ref. 4445 */ 4446 if (i && ci->i_nr_by_mode[i]) 4447 is_closed = false; 4448 } 4449 4450 if (is_closed) 4451 percpu_counter_dec(&mdsc->metric.opened_inodes); 4452 spin_unlock(&ci->i_ceph_lock); 4453 } 4454 4455 /* 4456 * For a soon-to-be unlinked file, drop the LINK caps. If it 4457 * looks like the link count will hit 0, drop any other caps (other 4458 * than PIN) we don't specifically want (due to the file still being 4459 * open). 4460 */ 4461 int ceph_drop_caps_for_unlink(struct inode *inode) 4462 { 4463 struct ceph_inode_info *ci = ceph_inode(inode); 4464 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; 4465 4466 spin_lock(&ci->i_ceph_lock); 4467 if (inode->i_nlink == 1) { 4468 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); 4469 4470 if (__ceph_caps_dirty(ci)) { 4471 struct ceph_mds_client *mdsc = 4472 ceph_inode_to_client(inode)->mdsc; 4473 __cap_delay_requeue_front(mdsc, ci); 4474 } 4475 } 4476 spin_unlock(&ci->i_ceph_lock); 4477 return drop; 4478 } 4479 4480 /* 4481 * Helpers for embedding cap and dentry lease releases into mds 4482 * requests. 4483 * 4484 * @force is used by dentry_release (below) to force inclusion of a 4485 * record for the directory inode, even when there aren't any caps to 4486 * drop. 4487 */ 4488 int ceph_encode_inode_release(void **p, struct inode *inode, 4489 int mds, int drop, int unless, int force) 4490 { 4491 struct ceph_inode_info *ci = ceph_inode(inode); 4492 struct ceph_cap *cap; 4493 struct ceph_mds_request_release *rel = *p; 4494 int used, dirty; 4495 int ret = 0; 4496 4497 spin_lock(&ci->i_ceph_lock); 4498 used = __ceph_caps_used(ci); 4499 dirty = __ceph_caps_dirty(ci); 4500 4501 dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n", 4502 inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop), 4503 ceph_cap_string(unless)); 4504 4505 /* only drop unused, clean caps */ 4506 drop &= ~(used | dirty); 4507 4508 cap = __get_cap_for_mds(ci, mds); 4509 if (cap && __cap_is_valid(cap)) { 4510 unless &= cap->issued; 4511 if (unless) { 4512 if (unless & CEPH_CAP_AUTH_EXCL) 4513 drop &= ~CEPH_CAP_AUTH_SHARED; 4514 if (unless & CEPH_CAP_LINK_EXCL) 4515 drop &= ~CEPH_CAP_LINK_SHARED; 4516 if (unless & CEPH_CAP_XATTR_EXCL) 4517 drop &= ~CEPH_CAP_XATTR_SHARED; 4518 if (unless & CEPH_CAP_FILE_EXCL) 4519 drop &= ~CEPH_CAP_FILE_SHARED; 4520 } 4521 4522 if (force || (cap->issued & drop)) { 4523 if (cap->issued & drop) { 4524 int wanted = __ceph_caps_wanted(ci); 4525 dout("encode_inode_release %p cap %p " 4526 "%s -> %s, wanted %s -> %s\n", inode, cap, 4527 ceph_cap_string(cap->issued), 4528 ceph_cap_string(cap->issued & ~drop), 4529 ceph_cap_string(cap->mds_wanted), 4530 ceph_cap_string(wanted)); 4531 4532 cap->issued &= ~drop; 4533 cap->implemented &= ~drop; 4534 cap->mds_wanted = wanted; 4535 if (cap == ci->i_auth_cap && 4536 !(wanted & CEPH_CAP_ANY_FILE_WR)) 4537 ci->i_requested_max_size = 0; 4538 } else { 4539 dout("encode_inode_release %p cap %p %s" 4540 " (force)\n", inode, cap, 4541 ceph_cap_string(cap->issued)); 4542 } 4543 4544 rel->ino = cpu_to_le64(ceph_ino(inode)); 4545 rel->cap_id = cpu_to_le64(cap->cap_id); 4546 rel->seq = cpu_to_le32(cap->seq); 4547 rel->issue_seq = cpu_to_le32(cap->issue_seq); 4548 rel->mseq = cpu_to_le32(cap->mseq); 4549 rel->caps = cpu_to_le32(cap->implemented); 4550 rel->wanted = cpu_to_le32(cap->mds_wanted); 4551 rel->dname_len = 0; 4552 rel->dname_seq = 0; 4553 *p += sizeof(*rel); 4554 ret = 1; 4555 } else { 4556 dout("encode_inode_release %p cap %p %s (noop)\n", 4557 inode, cap, ceph_cap_string(cap->issued)); 4558 } 4559 } 4560 spin_unlock(&ci->i_ceph_lock); 4561 return ret; 4562 } 4563 4564 int ceph_encode_dentry_release(void **p, struct dentry *dentry, 4565 struct inode *dir, 4566 int mds, int drop, int unless) 4567 { 4568 struct dentry *parent = NULL; 4569 struct ceph_mds_request_release *rel = *p; 4570 struct ceph_dentry_info *di = ceph_dentry(dentry); 4571 int force = 0; 4572 int ret; 4573 4574 /* 4575 * force an record for the directory caps if we have a dentry lease. 4576 * this is racy (can't take i_ceph_lock and d_lock together), but it 4577 * doesn't have to be perfect; the mds will revoke anything we don't 4578 * release. 4579 */ 4580 spin_lock(&dentry->d_lock); 4581 if (di->lease_session && di->lease_session->s_mds == mds) 4582 force = 1; 4583 if (!dir) { 4584 parent = dget(dentry->d_parent); 4585 dir = d_inode(parent); 4586 } 4587 spin_unlock(&dentry->d_lock); 4588 4589 ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force); 4590 dput(parent); 4591 4592 spin_lock(&dentry->d_lock); 4593 if (ret && di->lease_session && di->lease_session->s_mds == mds) { 4594 dout("encode_dentry_release %p mds%d seq %d\n", 4595 dentry, mds, (int)di->lease_seq); 4596 rel->dname_len = cpu_to_le32(dentry->d_name.len); 4597 memcpy(*p, dentry->d_name.name, dentry->d_name.len); 4598 *p += dentry->d_name.len; 4599 rel->dname_seq = cpu_to_le32(di->lease_seq); 4600 __ceph_mdsc_drop_dentry_lease(dentry); 4601 } 4602 spin_unlock(&dentry->d_lock); 4603 return ret; 4604 } 4605 4606 static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode) 4607 { 4608 struct ceph_inode_info *ci = ceph_inode(inode); 4609 struct ceph_cap_snap *capsnap; 4610 int capsnap_release = 0; 4611 4612 lockdep_assert_held(&ci->i_ceph_lock); 4613 4614 dout("removing capsnaps, ci is %p, inode is %p\n", ci, inode); 4615 4616 while (!list_empty(&ci->i_cap_snaps)) { 4617 capsnap = list_first_entry(&ci->i_cap_snaps, 4618 struct ceph_cap_snap, ci_item); 4619 __ceph_remove_capsnap(inode, capsnap, NULL, NULL); 4620 ceph_put_snap_context(capsnap->context); 4621 ceph_put_cap_snap(capsnap); 4622 capsnap_release++; 4623 } 4624 wake_up_all(&ci->i_cap_wq); 4625 wake_up_all(&mdsc->cap_flushing_wq); 4626 return capsnap_release; 4627 } 4628 4629 int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invalidate) 4630 { 4631 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 4632 struct ceph_mds_client *mdsc = fsc->mdsc; 4633 struct ceph_inode_info *ci = ceph_inode(inode); 4634 bool is_auth; 4635 bool dirty_dropped = false; 4636 int iputs = 0; 4637 4638 lockdep_assert_held(&ci->i_ceph_lock); 4639 4640 dout("removing cap %p, ci is %p, inode is %p\n", 4641 cap, ci, &ci->netfs.inode); 4642 4643 is_auth = (cap == ci->i_auth_cap); 4644 __ceph_remove_cap(cap, false); 4645 if (is_auth) { 4646 struct ceph_cap_flush *cf; 4647 4648 if (ceph_inode_is_shutdown(inode)) { 4649 if (inode->i_data.nrpages > 0) 4650 *invalidate = true; 4651 if (ci->i_wrbuffer_ref > 0) 4652 mapping_set_error(&inode->i_data, -EIO); 4653 } 4654 4655 spin_lock(&mdsc->cap_dirty_lock); 4656 4657 /* trash all of the cap flushes for this inode */ 4658 while (!list_empty(&ci->i_cap_flush_list)) { 4659 cf = list_first_entry(&ci->i_cap_flush_list, 4660 struct ceph_cap_flush, i_list); 4661 list_del_init(&cf->g_list); 4662 list_del_init(&cf->i_list); 4663 if (!cf->is_capsnap) 4664 ceph_free_cap_flush(cf); 4665 } 4666 4667 if (!list_empty(&ci->i_dirty_item)) { 4668 pr_warn_ratelimited( 4669 " dropping dirty %s state for %p %lld\n", 4670 ceph_cap_string(ci->i_dirty_caps), 4671 inode, ceph_ino(inode)); 4672 ci->i_dirty_caps = 0; 4673 list_del_init(&ci->i_dirty_item); 4674 dirty_dropped = true; 4675 } 4676 if (!list_empty(&ci->i_flushing_item)) { 4677 pr_warn_ratelimited( 4678 " dropping dirty+flushing %s state for %p %lld\n", 4679 ceph_cap_string(ci->i_flushing_caps), 4680 inode, ceph_ino(inode)); 4681 ci->i_flushing_caps = 0; 4682 list_del_init(&ci->i_flushing_item); 4683 mdsc->num_cap_flushing--; 4684 dirty_dropped = true; 4685 } 4686 spin_unlock(&mdsc->cap_dirty_lock); 4687 4688 if (dirty_dropped) { 4689 mapping_set_error(inode->i_mapping, -EIO); 4690 4691 if (ci->i_wrbuffer_ref_head == 0 && 4692 ci->i_wr_ref == 0 && 4693 ci->i_dirty_caps == 0 && 4694 ci->i_flushing_caps == 0) { 4695 ceph_put_snap_context(ci->i_head_snapc); 4696 ci->i_head_snapc = NULL; 4697 } 4698 } 4699 4700 if (atomic_read(&ci->i_filelock_ref) > 0) { 4701 /* make further file lock syscall return -EIO */ 4702 ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK; 4703 pr_warn_ratelimited(" dropping file locks for %p %lld\n", 4704 inode, ceph_ino(inode)); 4705 } 4706 4707 if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) { 4708 cf = ci->i_prealloc_cap_flush; 4709 ci->i_prealloc_cap_flush = NULL; 4710 if (!cf->is_capsnap) 4711 ceph_free_cap_flush(cf); 4712 } 4713 4714 if (!list_empty(&ci->i_cap_snaps)) 4715 iputs = remove_capsnaps(mdsc, inode); 4716 } 4717 if (dirty_dropped) 4718 ++iputs; 4719 return iputs; 4720 } 4721