1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 4 #include <linux/module.h> 5 #include <linux/fs.h> 6 #include <linux/slab.h> 7 #include <linux/string.h> 8 #include <linux/uaccess.h> 9 #include <linux/kernel.h> 10 #include <linux/writeback.h> 11 #include <linux/vmalloc.h> 12 #include <linux/xattr.h> 13 #include <linux/posix_acl.h> 14 #include <linux/random.h> 15 #include <linux/sort.h> 16 #include <linux/iversion.h> 17 18 #include "super.h" 19 #include "mds_client.h" 20 #include "cache.h" 21 #include <linux/ceph/decode.h> 22 23 /* 24 * Ceph inode operations 25 * 26 * Implement basic inode helpers (get, alloc) and inode ops (getattr, 27 * setattr, etc.), xattr helpers, and helpers for assimilating 28 * metadata returned by the MDS into our cache. 29 * 30 * Also define helpers for doing asynchronous writeback, invalidation, 31 * and truncation for the benefit of those who can't afford to block 32 * (typically because they are in the message handler path). 33 */ 34 35 static const struct inode_operations ceph_symlink_iops; 36 37 static void ceph_inode_work(struct work_struct *work); 38 39 /* 40 * find or create an inode, given the ceph ino number 41 */ 42 static int ceph_set_ino_cb(struct inode *inode, void *data) 43 { 44 struct ceph_inode_info *ci = ceph_inode(inode); 45 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); 46 47 ci->i_vino = *(struct ceph_vino *)data; 48 inode->i_ino = ceph_vino_to_ino_t(ci->i_vino); 49 inode_set_iversion_raw(inode, 0); 50 percpu_counter_inc(&mdsc->metric.total_inodes); 51 52 return 0; 53 } 54 55 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino) 56 { 57 struct inode *inode; 58 59 if (ceph_vino_is_reserved(vino)) 60 return ERR_PTR(-EREMOTEIO); 61 62 inode = iget5_locked(sb, (unsigned long)vino.ino, ceph_ino_compare, 63 ceph_set_ino_cb, &vino); 64 if (!inode) 65 return ERR_PTR(-ENOMEM); 66 67 dout("get_inode on %llu=%llx.%llx got %p new %d\n", ceph_present_inode(inode), 68 ceph_vinop(inode), inode, !!(inode->i_state & I_NEW)); 69 return inode; 70 } 71 72 /* 73 * get/constuct snapdir inode for a given directory 74 */ 75 struct inode *ceph_get_snapdir(struct inode *parent) 76 { 77 struct ceph_vino vino = { 78 .ino = ceph_ino(parent), 79 .snap = CEPH_SNAPDIR, 80 }; 81 struct inode *inode = ceph_get_inode(parent->i_sb, vino); 82 struct ceph_inode_info *ci = ceph_inode(inode); 83 84 if (IS_ERR(inode)) 85 return inode; 86 87 if (!S_ISDIR(parent->i_mode)) { 88 pr_warn_once("bad snapdir parent type (mode=0%o)\n", 89 parent->i_mode); 90 return ERR_PTR(-ENOTDIR); 91 } 92 93 if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) { 94 pr_warn_once("bad snapdir inode type (mode=0%o)\n", 95 inode->i_mode); 96 return ERR_PTR(-ENOTDIR); 97 } 98 99 inode->i_mode = parent->i_mode; 100 inode->i_uid = parent->i_uid; 101 inode->i_gid = parent->i_gid; 102 inode->i_mtime = parent->i_mtime; 103 inode->i_ctime = parent->i_ctime; 104 inode->i_atime = parent->i_atime; 105 ci->i_rbytes = 0; 106 ci->i_btime = ceph_inode(parent)->i_btime; 107 108 if (inode->i_state & I_NEW) { 109 inode->i_op = &ceph_snapdir_iops; 110 inode->i_fop = &ceph_snapdir_fops; 111 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ 112 unlock_new_inode(inode); 113 } 114 115 return inode; 116 } 117 118 const struct inode_operations ceph_file_iops = { 119 .permission = ceph_permission, 120 .setattr = ceph_setattr, 121 .getattr = ceph_getattr, 122 .listxattr = ceph_listxattr, 123 .get_acl = ceph_get_acl, 124 .set_acl = ceph_set_acl, 125 }; 126 127 128 /* 129 * We use a 'frag tree' to keep track of the MDS's directory fragments 130 * for a given inode (usually there is just a single fragment). We 131 * need to know when a child frag is delegated to a new MDS, or when 132 * it is flagged as replicated, so we can direct our requests 133 * accordingly. 134 */ 135 136 /* 137 * find/create a frag in the tree 138 */ 139 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci, 140 u32 f) 141 { 142 struct rb_node **p; 143 struct rb_node *parent = NULL; 144 struct ceph_inode_frag *frag; 145 int c; 146 147 p = &ci->i_fragtree.rb_node; 148 while (*p) { 149 parent = *p; 150 frag = rb_entry(parent, struct ceph_inode_frag, node); 151 c = ceph_frag_compare(f, frag->frag); 152 if (c < 0) 153 p = &(*p)->rb_left; 154 else if (c > 0) 155 p = &(*p)->rb_right; 156 else 157 return frag; 158 } 159 160 frag = kmalloc(sizeof(*frag), GFP_NOFS); 161 if (!frag) 162 return ERR_PTR(-ENOMEM); 163 164 frag->frag = f; 165 frag->split_by = 0; 166 frag->mds = -1; 167 frag->ndist = 0; 168 169 rb_link_node(&frag->node, parent, p); 170 rb_insert_color(&frag->node, &ci->i_fragtree); 171 172 dout("get_or_create_frag added %llx.%llx frag %x\n", 173 ceph_vinop(&ci->vfs_inode), f); 174 return frag; 175 } 176 177 /* 178 * find a specific frag @f 179 */ 180 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f) 181 { 182 struct rb_node *n = ci->i_fragtree.rb_node; 183 184 while (n) { 185 struct ceph_inode_frag *frag = 186 rb_entry(n, struct ceph_inode_frag, node); 187 int c = ceph_frag_compare(f, frag->frag); 188 if (c < 0) 189 n = n->rb_left; 190 else if (c > 0) 191 n = n->rb_right; 192 else 193 return frag; 194 } 195 return NULL; 196 } 197 198 /* 199 * Choose frag containing the given value @v. If @pfrag is 200 * specified, copy the frag delegation info to the caller if 201 * it is present. 202 */ 203 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v, 204 struct ceph_inode_frag *pfrag, int *found) 205 { 206 u32 t = ceph_frag_make(0, 0); 207 struct ceph_inode_frag *frag; 208 unsigned nway, i; 209 u32 n; 210 211 if (found) 212 *found = 0; 213 214 while (1) { 215 WARN_ON(!ceph_frag_contains_value(t, v)); 216 frag = __ceph_find_frag(ci, t); 217 if (!frag) 218 break; /* t is a leaf */ 219 if (frag->split_by == 0) { 220 if (pfrag) 221 memcpy(pfrag, frag, sizeof(*pfrag)); 222 if (found) 223 *found = 1; 224 break; 225 } 226 227 /* choose child */ 228 nway = 1 << frag->split_by; 229 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t, 230 frag->split_by, nway); 231 for (i = 0; i < nway; i++) { 232 n = ceph_frag_make_child(t, frag->split_by, i); 233 if (ceph_frag_contains_value(n, v)) { 234 t = n; 235 break; 236 } 237 } 238 BUG_ON(i == nway); 239 } 240 dout("choose_frag(%x) = %x\n", v, t); 241 242 return t; 243 } 244 245 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, 246 struct ceph_inode_frag *pfrag, int *found) 247 { 248 u32 ret; 249 mutex_lock(&ci->i_fragtree_mutex); 250 ret = __ceph_choose_frag(ci, v, pfrag, found); 251 mutex_unlock(&ci->i_fragtree_mutex); 252 return ret; 253 } 254 255 /* 256 * Process dirfrag (delegation) info from the mds. Include leaf 257 * fragment in tree ONLY if ndist > 0. Otherwise, only 258 * branches/splits are included in i_fragtree) 259 */ 260 static int ceph_fill_dirfrag(struct inode *inode, 261 struct ceph_mds_reply_dirfrag *dirinfo) 262 { 263 struct ceph_inode_info *ci = ceph_inode(inode); 264 struct ceph_inode_frag *frag; 265 u32 id = le32_to_cpu(dirinfo->frag); 266 int mds = le32_to_cpu(dirinfo->auth); 267 int ndist = le32_to_cpu(dirinfo->ndist); 268 int diri_auth = -1; 269 int i; 270 int err = 0; 271 272 spin_lock(&ci->i_ceph_lock); 273 if (ci->i_auth_cap) 274 diri_auth = ci->i_auth_cap->mds; 275 spin_unlock(&ci->i_ceph_lock); 276 277 if (mds == -1) /* CDIR_AUTH_PARENT */ 278 mds = diri_auth; 279 280 mutex_lock(&ci->i_fragtree_mutex); 281 if (ndist == 0 && mds == diri_auth) { 282 /* no delegation info needed. */ 283 frag = __ceph_find_frag(ci, id); 284 if (!frag) 285 goto out; 286 if (frag->split_by == 0) { 287 /* tree leaf, remove */ 288 dout("fill_dirfrag removed %llx.%llx frag %x" 289 " (no ref)\n", ceph_vinop(inode), id); 290 rb_erase(&frag->node, &ci->i_fragtree); 291 kfree(frag); 292 } else { 293 /* tree branch, keep and clear */ 294 dout("fill_dirfrag cleared %llx.%llx frag %x" 295 " referral\n", ceph_vinop(inode), id); 296 frag->mds = -1; 297 frag->ndist = 0; 298 } 299 goto out; 300 } 301 302 303 /* find/add this frag to store mds delegation info */ 304 frag = __get_or_create_frag(ci, id); 305 if (IS_ERR(frag)) { 306 /* this is not the end of the world; we can continue 307 with bad/inaccurate delegation info */ 308 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n", 309 ceph_vinop(inode), le32_to_cpu(dirinfo->frag)); 310 err = -ENOMEM; 311 goto out; 312 } 313 314 frag->mds = mds; 315 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP); 316 for (i = 0; i < frag->ndist; i++) 317 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]); 318 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n", 319 ceph_vinop(inode), frag->frag, frag->ndist); 320 321 out: 322 mutex_unlock(&ci->i_fragtree_mutex); 323 return err; 324 } 325 326 static int frag_tree_split_cmp(const void *l, const void *r) 327 { 328 struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l; 329 struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r; 330 return ceph_frag_compare(le32_to_cpu(ls->frag), 331 le32_to_cpu(rs->frag)); 332 } 333 334 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag) 335 { 336 if (!frag) 337 return f == ceph_frag_make(0, 0); 338 if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by) 339 return false; 340 return ceph_frag_contains_value(frag->frag, ceph_frag_value(f)); 341 } 342 343 static int ceph_fill_fragtree(struct inode *inode, 344 struct ceph_frag_tree_head *fragtree, 345 struct ceph_mds_reply_dirfrag *dirinfo) 346 { 347 struct ceph_inode_info *ci = ceph_inode(inode); 348 struct ceph_inode_frag *frag, *prev_frag = NULL; 349 struct rb_node *rb_node; 350 unsigned i, split_by, nsplits; 351 u32 id; 352 bool update = false; 353 354 mutex_lock(&ci->i_fragtree_mutex); 355 nsplits = le32_to_cpu(fragtree->nsplits); 356 if (nsplits != ci->i_fragtree_nsplits) { 357 update = true; 358 } else if (nsplits) { 359 i = prandom_u32() % nsplits; 360 id = le32_to_cpu(fragtree->splits[i].frag); 361 if (!__ceph_find_frag(ci, id)) 362 update = true; 363 } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) { 364 rb_node = rb_first(&ci->i_fragtree); 365 frag = rb_entry(rb_node, struct ceph_inode_frag, node); 366 if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node)) 367 update = true; 368 } 369 if (!update && dirinfo) { 370 id = le32_to_cpu(dirinfo->frag); 371 if (id != __ceph_choose_frag(ci, id, NULL, NULL)) 372 update = true; 373 } 374 if (!update) 375 goto out_unlock; 376 377 if (nsplits > 1) { 378 sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]), 379 frag_tree_split_cmp, NULL); 380 } 381 382 dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode)); 383 rb_node = rb_first(&ci->i_fragtree); 384 for (i = 0; i < nsplits; i++) { 385 id = le32_to_cpu(fragtree->splits[i].frag); 386 split_by = le32_to_cpu(fragtree->splits[i].by); 387 if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) { 388 pr_err("fill_fragtree %llx.%llx invalid split %d/%u, " 389 "frag %x split by %d\n", ceph_vinop(inode), 390 i, nsplits, id, split_by); 391 continue; 392 } 393 frag = NULL; 394 while (rb_node) { 395 frag = rb_entry(rb_node, struct ceph_inode_frag, node); 396 if (ceph_frag_compare(frag->frag, id) >= 0) { 397 if (frag->frag != id) 398 frag = NULL; 399 else 400 rb_node = rb_next(rb_node); 401 break; 402 } 403 rb_node = rb_next(rb_node); 404 /* delete stale split/leaf node */ 405 if (frag->split_by > 0 || 406 !is_frag_child(frag->frag, prev_frag)) { 407 rb_erase(&frag->node, &ci->i_fragtree); 408 if (frag->split_by > 0) 409 ci->i_fragtree_nsplits--; 410 kfree(frag); 411 } 412 frag = NULL; 413 } 414 if (!frag) { 415 frag = __get_or_create_frag(ci, id); 416 if (IS_ERR(frag)) 417 continue; 418 } 419 if (frag->split_by == 0) 420 ci->i_fragtree_nsplits++; 421 frag->split_by = split_by; 422 dout(" frag %x split by %d\n", frag->frag, frag->split_by); 423 prev_frag = frag; 424 } 425 while (rb_node) { 426 frag = rb_entry(rb_node, struct ceph_inode_frag, node); 427 rb_node = rb_next(rb_node); 428 /* delete stale split/leaf node */ 429 if (frag->split_by > 0 || 430 !is_frag_child(frag->frag, prev_frag)) { 431 rb_erase(&frag->node, &ci->i_fragtree); 432 if (frag->split_by > 0) 433 ci->i_fragtree_nsplits--; 434 kfree(frag); 435 } 436 } 437 out_unlock: 438 mutex_unlock(&ci->i_fragtree_mutex); 439 return 0; 440 } 441 442 /* 443 * initialize a newly allocated inode. 444 */ 445 struct inode *ceph_alloc_inode(struct super_block *sb) 446 { 447 struct ceph_inode_info *ci; 448 int i; 449 450 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS); 451 if (!ci) 452 return NULL; 453 454 dout("alloc_inode %p\n", &ci->vfs_inode); 455 456 spin_lock_init(&ci->i_ceph_lock); 457 458 ci->i_version = 0; 459 ci->i_inline_version = 0; 460 ci->i_time_warp_seq = 0; 461 ci->i_ceph_flags = 0; 462 atomic64_set(&ci->i_ordered_count, 1); 463 atomic64_set(&ci->i_release_count, 1); 464 atomic64_set(&ci->i_complete_seq[0], 0); 465 atomic64_set(&ci->i_complete_seq[1], 0); 466 ci->i_symlink = NULL; 467 468 ci->i_max_bytes = 0; 469 ci->i_max_files = 0; 470 471 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout)); 472 memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout)); 473 RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL); 474 475 ci->i_fragtree = RB_ROOT; 476 mutex_init(&ci->i_fragtree_mutex); 477 478 ci->i_xattrs.blob = NULL; 479 ci->i_xattrs.prealloc_blob = NULL; 480 ci->i_xattrs.dirty = false; 481 ci->i_xattrs.index = RB_ROOT; 482 ci->i_xattrs.count = 0; 483 ci->i_xattrs.names_size = 0; 484 ci->i_xattrs.vals_size = 0; 485 ci->i_xattrs.version = 0; 486 ci->i_xattrs.index_version = 0; 487 488 ci->i_caps = RB_ROOT; 489 ci->i_auth_cap = NULL; 490 ci->i_dirty_caps = 0; 491 ci->i_flushing_caps = 0; 492 INIT_LIST_HEAD(&ci->i_dirty_item); 493 INIT_LIST_HEAD(&ci->i_flushing_item); 494 ci->i_prealloc_cap_flush = NULL; 495 INIT_LIST_HEAD(&ci->i_cap_flush_list); 496 init_waitqueue_head(&ci->i_cap_wq); 497 ci->i_hold_caps_max = 0; 498 INIT_LIST_HEAD(&ci->i_cap_delay_list); 499 INIT_LIST_HEAD(&ci->i_cap_snaps); 500 ci->i_head_snapc = NULL; 501 ci->i_snap_caps = 0; 502 503 ci->i_last_rd = ci->i_last_wr = jiffies - 3600 * HZ; 504 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) 505 ci->i_nr_by_mode[i] = 0; 506 507 mutex_init(&ci->i_truncate_mutex); 508 ci->i_truncate_seq = 0; 509 ci->i_truncate_size = 0; 510 ci->i_truncate_pending = 0; 511 512 ci->i_max_size = 0; 513 ci->i_reported_size = 0; 514 ci->i_wanted_max_size = 0; 515 ci->i_requested_max_size = 0; 516 517 ci->i_pin_ref = 0; 518 ci->i_rd_ref = 0; 519 ci->i_rdcache_ref = 0; 520 ci->i_wr_ref = 0; 521 ci->i_wb_ref = 0; 522 ci->i_fx_ref = 0; 523 ci->i_wrbuffer_ref = 0; 524 ci->i_wrbuffer_ref_head = 0; 525 atomic_set(&ci->i_filelock_ref, 0); 526 atomic_set(&ci->i_shared_gen, 1); 527 ci->i_rdcache_gen = 0; 528 ci->i_rdcache_revoking = 0; 529 530 INIT_LIST_HEAD(&ci->i_unsafe_dirops); 531 INIT_LIST_HEAD(&ci->i_unsafe_iops); 532 spin_lock_init(&ci->i_unsafe_lock); 533 534 ci->i_snap_realm = NULL; 535 INIT_LIST_HEAD(&ci->i_snap_realm_item); 536 INIT_LIST_HEAD(&ci->i_snap_flush_item); 537 538 INIT_WORK(&ci->i_work, ceph_inode_work); 539 ci->i_work_mask = 0; 540 memset(&ci->i_btime, '\0', sizeof(ci->i_btime)); 541 542 ceph_fscache_inode_init(ci); 543 544 ci->i_meta_err = 0; 545 546 return &ci->vfs_inode; 547 } 548 549 void ceph_free_inode(struct inode *inode) 550 { 551 struct ceph_inode_info *ci = ceph_inode(inode); 552 553 kfree(ci->i_symlink); 554 kmem_cache_free(ceph_inode_cachep, ci); 555 } 556 557 void ceph_evict_inode(struct inode *inode) 558 { 559 struct ceph_inode_info *ci = ceph_inode(inode); 560 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); 561 struct ceph_inode_frag *frag; 562 struct rb_node *n; 563 564 dout("evict_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode)); 565 566 percpu_counter_dec(&mdsc->metric.total_inodes); 567 568 truncate_inode_pages_final(&inode->i_data); 569 clear_inode(inode); 570 571 ceph_fscache_unregister_inode_cookie(ci); 572 573 __ceph_remove_caps(ci); 574 575 if (__ceph_has_any_quota(ci)) 576 ceph_adjust_quota_realms_count(inode, false); 577 578 /* 579 * we may still have a snap_realm reference if there are stray 580 * caps in i_snap_caps. 581 */ 582 if (ci->i_snap_realm) { 583 if (ceph_snap(inode) == CEPH_NOSNAP) { 584 dout(" dropping residual ref to snap realm %p\n", 585 ci->i_snap_realm); 586 ceph_change_snap_realm(inode, NULL); 587 } else { 588 ceph_put_snapid_map(mdsc, ci->i_snapid_map); 589 ci->i_snap_realm = NULL; 590 } 591 } 592 593 while ((n = rb_first(&ci->i_fragtree)) != NULL) { 594 frag = rb_entry(n, struct ceph_inode_frag, node); 595 rb_erase(n, &ci->i_fragtree); 596 kfree(frag); 597 } 598 ci->i_fragtree_nsplits = 0; 599 600 __ceph_destroy_xattrs(ci); 601 if (ci->i_xattrs.blob) 602 ceph_buffer_put(ci->i_xattrs.blob); 603 if (ci->i_xattrs.prealloc_blob) 604 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 605 606 ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns)); 607 ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns)); 608 } 609 610 static inline blkcnt_t calc_inode_blocks(u64 size) 611 { 612 return (size + (1<<9) - 1) >> 9; 613 } 614 615 /* 616 * Helpers to fill in size, ctime, mtime, and atime. We have to be 617 * careful because either the client or MDS may have more up to date 618 * info, depending on which capabilities are held, and whether 619 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime 620 * and size are monotonically increasing, except when utimes() or 621 * truncate() increments the corresponding _seq values.) 622 */ 623 int ceph_fill_file_size(struct inode *inode, int issued, 624 u32 truncate_seq, u64 truncate_size, u64 size) 625 { 626 struct ceph_inode_info *ci = ceph_inode(inode); 627 int queue_trunc = 0; 628 loff_t isize = i_size_read(inode); 629 630 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 || 631 (truncate_seq == ci->i_truncate_seq && size > isize)) { 632 dout("size %lld -> %llu\n", isize, size); 633 if (size > 0 && S_ISDIR(inode->i_mode)) { 634 pr_err("fill_file_size non-zero size for directory\n"); 635 size = 0; 636 } 637 i_size_write(inode, size); 638 inode->i_blocks = calc_inode_blocks(size); 639 ci->i_reported_size = size; 640 if (truncate_seq != ci->i_truncate_seq) { 641 dout("truncate_seq %u -> %u\n", 642 ci->i_truncate_seq, truncate_seq); 643 ci->i_truncate_seq = truncate_seq; 644 645 /* the MDS should have revoked these caps */ 646 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL | 647 CEPH_CAP_FILE_RD | 648 CEPH_CAP_FILE_WR | 649 CEPH_CAP_FILE_LAZYIO)); 650 /* 651 * If we hold relevant caps, or in the case where we're 652 * not the only client referencing this file and we 653 * don't hold those caps, then we need to check whether 654 * the file is either opened or mmaped 655 */ 656 if ((issued & (CEPH_CAP_FILE_CACHE| 657 CEPH_CAP_FILE_BUFFER)) || 658 mapping_mapped(inode->i_mapping) || 659 __ceph_is_file_opened(ci)) { 660 ci->i_truncate_pending++; 661 queue_trunc = 1; 662 } 663 } 664 } 665 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 && 666 ci->i_truncate_size != truncate_size) { 667 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size, 668 truncate_size); 669 ci->i_truncate_size = truncate_size; 670 } 671 672 if (queue_trunc) 673 ceph_fscache_invalidate(inode); 674 675 return queue_trunc; 676 } 677 678 void ceph_fill_file_time(struct inode *inode, int issued, 679 u64 time_warp_seq, struct timespec64 *ctime, 680 struct timespec64 *mtime, struct timespec64 *atime) 681 { 682 struct ceph_inode_info *ci = ceph_inode(inode); 683 int warn = 0; 684 685 if (issued & (CEPH_CAP_FILE_EXCL| 686 CEPH_CAP_FILE_WR| 687 CEPH_CAP_FILE_BUFFER| 688 CEPH_CAP_AUTH_EXCL| 689 CEPH_CAP_XATTR_EXCL)) { 690 if (ci->i_version == 0 || 691 timespec64_compare(ctime, &inode->i_ctime) > 0) { 692 dout("ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n", 693 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 694 ctime->tv_sec, ctime->tv_nsec); 695 inode->i_ctime = *ctime; 696 } 697 if (ci->i_version == 0 || 698 ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { 699 /* the MDS did a utimes() */ 700 dout("mtime %lld.%09ld -> %lld.%09ld " 701 "tw %d -> %d\n", 702 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 703 mtime->tv_sec, mtime->tv_nsec, 704 ci->i_time_warp_seq, (int)time_warp_seq); 705 706 inode->i_mtime = *mtime; 707 inode->i_atime = *atime; 708 ci->i_time_warp_seq = time_warp_seq; 709 } else if (time_warp_seq == ci->i_time_warp_seq) { 710 /* nobody did utimes(); take the max */ 711 if (timespec64_compare(mtime, &inode->i_mtime) > 0) { 712 dout("mtime %lld.%09ld -> %lld.%09ld inc\n", 713 inode->i_mtime.tv_sec, 714 inode->i_mtime.tv_nsec, 715 mtime->tv_sec, mtime->tv_nsec); 716 inode->i_mtime = *mtime; 717 } 718 if (timespec64_compare(atime, &inode->i_atime) > 0) { 719 dout("atime %lld.%09ld -> %lld.%09ld inc\n", 720 inode->i_atime.tv_sec, 721 inode->i_atime.tv_nsec, 722 atime->tv_sec, atime->tv_nsec); 723 inode->i_atime = *atime; 724 } 725 } else if (issued & CEPH_CAP_FILE_EXCL) { 726 /* we did a utimes(); ignore mds values */ 727 } else { 728 warn = 1; 729 } 730 } else { 731 /* we have no write|excl caps; whatever the MDS says is true */ 732 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) { 733 inode->i_ctime = *ctime; 734 inode->i_mtime = *mtime; 735 inode->i_atime = *atime; 736 ci->i_time_warp_seq = time_warp_seq; 737 } else { 738 warn = 1; 739 } 740 } 741 if (warn) /* time_warp_seq shouldn't go backwards */ 742 dout("%p mds time_warp_seq %llu < %u\n", 743 inode, time_warp_seq, ci->i_time_warp_seq); 744 } 745 746 /* 747 * Populate an inode based on info from mds. May be called on new or 748 * existing inodes. 749 */ 750 int ceph_fill_inode(struct inode *inode, struct page *locked_page, 751 struct ceph_mds_reply_info_in *iinfo, 752 struct ceph_mds_reply_dirfrag *dirinfo, 753 struct ceph_mds_session *session, int cap_fmode, 754 struct ceph_cap_reservation *caps_reservation) 755 { 756 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb); 757 struct ceph_mds_reply_inode *info = iinfo->in; 758 struct ceph_inode_info *ci = ceph_inode(inode); 759 int issued, new_issued, info_caps; 760 struct timespec64 mtime, atime, ctime; 761 struct ceph_buffer *xattr_blob = NULL; 762 struct ceph_buffer *old_blob = NULL; 763 struct ceph_string *pool_ns = NULL; 764 struct ceph_cap *new_cap = NULL; 765 int err = 0; 766 bool wake = false; 767 bool queue_trunc = false; 768 bool new_version = false; 769 bool fill_inline = false; 770 umode_t mode = le32_to_cpu(info->mode); 771 dev_t rdev = le32_to_cpu(info->rdev); 772 773 lockdep_assert_held(&mdsc->snap_rwsem); 774 775 dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__, 776 inode, ceph_vinop(inode), le64_to_cpu(info->version), 777 ci->i_version); 778 779 /* Once I_NEW is cleared, we can't change type or dev numbers */ 780 if (inode->i_state & I_NEW) { 781 inode->i_mode = mode; 782 } else { 783 if (inode_wrong_type(inode, mode)) { 784 pr_warn_once("inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n", 785 ceph_vinop(inode), inode->i_mode, mode); 786 return -ESTALE; 787 } 788 789 if ((S_ISCHR(mode) || S_ISBLK(mode)) && inode->i_rdev != rdev) { 790 pr_warn_once("dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n", 791 ceph_vinop(inode), MAJOR(inode->i_rdev), 792 MINOR(inode->i_rdev), MAJOR(rdev), 793 MINOR(rdev)); 794 return -ESTALE; 795 } 796 } 797 798 info_caps = le32_to_cpu(info->cap.caps); 799 800 /* prealloc new cap struct */ 801 if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) { 802 new_cap = ceph_get_cap(mdsc, caps_reservation); 803 if (!new_cap) 804 return -ENOMEM; 805 } 806 807 /* 808 * prealloc xattr data, if it looks like we'll need it. only 809 * if len > 4 (meaning there are actually xattrs; the first 4 810 * bytes are the xattr count). 811 */ 812 if (iinfo->xattr_len > 4) { 813 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS); 814 if (!xattr_blob) 815 pr_err("%s ENOMEM xattr blob %d bytes\n", __func__, 816 iinfo->xattr_len); 817 } 818 819 if (iinfo->pool_ns_len > 0) 820 pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data, 821 iinfo->pool_ns_len); 822 823 if (ceph_snap(inode) != CEPH_NOSNAP && !ci->i_snapid_map) 824 ci->i_snapid_map = ceph_get_snapid_map(mdsc, ceph_snap(inode)); 825 826 spin_lock(&ci->i_ceph_lock); 827 828 /* 829 * provided version will be odd if inode value is projected, 830 * even if stable. skip the update if we have newer stable 831 * info (ours>=theirs, e.g. due to racing mds replies), unless 832 * we are getting projected (unstable) info (in which case the 833 * version is odd, and we want ours>theirs). 834 * us them 835 * 2 2 skip 836 * 3 2 skip 837 * 3 3 update 838 */ 839 if (ci->i_version == 0 || 840 ((info->cap.flags & CEPH_CAP_FLAG_AUTH) && 841 le64_to_cpu(info->version) > (ci->i_version & ~1))) 842 new_version = true; 843 844 /* Update change_attribute */ 845 inode_set_max_iversion_raw(inode, iinfo->change_attr); 846 847 __ceph_caps_issued(ci, &issued); 848 issued |= __ceph_caps_dirty(ci); 849 new_issued = ~issued & info_caps; 850 851 /* directories have fl_stripe_unit set to zero */ 852 if (le32_to_cpu(info->layout.fl_stripe_unit)) 853 inode->i_blkbits = 854 fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; 855 else 856 inode->i_blkbits = CEPH_BLOCK_SHIFT; 857 858 __ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files); 859 860 if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) && 861 (issued & CEPH_CAP_AUTH_EXCL) == 0) { 862 inode->i_mode = mode; 863 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid)); 864 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid)); 865 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, 866 from_kuid(&init_user_ns, inode->i_uid), 867 from_kgid(&init_user_ns, inode->i_gid)); 868 ceph_decode_timespec64(&ci->i_btime, &iinfo->btime); 869 ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime); 870 } 871 872 if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) && 873 (issued & CEPH_CAP_LINK_EXCL) == 0) 874 set_nlink(inode, le32_to_cpu(info->nlink)); 875 876 if (new_version || (new_issued & CEPH_CAP_ANY_RD)) { 877 /* be careful with mtime, atime, size */ 878 ceph_decode_timespec64(&atime, &info->atime); 879 ceph_decode_timespec64(&mtime, &info->mtime); 880 ceph_decode_timespec64(&ctime, &info->ctime); 881 ceph_fill_file_time(inode, issued, 882 le32_to_cpu(info->time_warp_seq), 883 &ctime, &mtime, &atime); 884 } 885 886 if (new_version || (info_caps & CEPH_CAP_FILE_SHARED)) { 887 ci->i_files = le64_to_cpu(info->files); 888 ci->i_subdirs = le64_to_cpu(info->subdirs); 889 } 890 891 if (new_version || 892 (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) { 893 s64 old_pool = ci->i_layout.pool_id; 894 struct ceph_string *old_ns; 895 896 ceph_file_layout_from_legacy(&ci->i_layout, &info->layout); 897 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns, 898 lockdep_is_held(&ci->i_ceph_lock)); 899 rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns); 900 901 if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns) 902 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM; 903 904 pool_ns = old_ns; 905 906 queue_trunc = ceph_fill_file_size(inode, issued, 907 le32_to_cpu(info->truncate_seq), 908 le64_to_cpu(info->truncate_size), 909 le64_to_cpu(info->size)); 910 /* only update max_size on auth cap */ 911 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) && 912 ci->i_max_size != le64_to_cpu(info->max_size)) { 913 dout("max_size %lld -> %llu\n", ci->i_max_size, 914 le64_to_cpu(info->max_size)); 915 ci->i_max_size = le64_to_cpu(info->max_size); 916 } 917 } 918 919 /* layout and rstat are not tracked by capability, update them if 920 * the inode info is from auth mds */ 921 if (new_version || (info->cap.flags & CEPH_CAP_FLAG_AUTH)) { 922 if (S_ISDIR(inode->i_mode)) { 923 ci->i_dir_layout = iinfo->dir_layout; 924 ci->i_rbytes = le64_to_cpu(info->rbytes); 925 ci->i_rfiles = le64_to_cpu(info->rfiles); 926 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs); 927 ci->i_dir_pin = iinfo->dir_pin; 928 ci->i_rsnaps = iinfo->rsnaps; 929 ceph_decode_timespec64(&ci->i_rctime, &info->rctime); 930 } 931 } 932 933 /* xattrs */ 934 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */ 935 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) && 936 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { 937 if (ci->i_xattrs.blob) 938 old_blob = ci->i_xattrs.blob; 939 ci->i_xattrs.blob = xattr_blob; 940 if (xattr_blob) 941 memcpy(ci->i_xattrs.blob->vec.iov_base, 942 iinfo->xattr_data, iinfo->xattr_len); 943 ci->i_xattrs.version = le64_to_cpu(info->xattr_version); 944 ceph_forget_all_cached_acls(inode); 945 ceph_security_invalidate_secctx(inode); 946 xattr_blob = NULL; 947 } 948 949 /* finally update i_version */ 950 if (le64_to_cpu(info->version) > ci->i_version) 951 ci->i_version = le64_to_cpu(info->version); 952 953 inode->i_mapping->a_ops = &ceph_aops; 954 955 switch (inode->i_mode & S_IFMT) { 956 case S_IFIFO: 957 case S_IFBLK: 958 case S_IFCHR: 959 case S_IFSOCK: 960 inode->i_blkbits = PAGE_SHIFT; 961 init_special_inode(inode, inode->i_mode, rdev); 962 inode->i_op = &ceph_file_iops; 963 break; 964 case S_IFREG: 965 inode->i_op = &ceph_file_iops; 966 inode->i_fop = &ceph_file_fops; 967 break; 968 case S_IFLNK: 969 inode->i_op = &ceph_symlink_iops; 970 if (!ci->i_symlink) { 971 u32 symlen = iinfo->symlink_len; 972 char *sym; 973 974 spin_unlock(&ci->i_ceph_lock); 975 976 if (symlen != i_size_read(inode)) { 977 pr_err("%s %llx.%llx BAD symlink " 978 "size %lld\n", __func__, 979 ceph_vinop(inode), 980 i_size_read(inode)); 981 i_size_write(inode, symlen); 982 inode->i_blocks = calc_inode_blocks(symlen); 983 } 984 985 err = -ENOMEM; 986 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS); 987 if (!sym) 988 goto out; 989 990 spin_lock(&ci->i_ceph_lock); 991 if (!ci->i_symlink) 992 ci->i_symlink = sym; 993 else 994 kfree(sym); /* lost a race */ 995 } 996 inode->i_link = ci->i_symlink; 997 break; 998 case S_IFDIR: 999 inode->i_op = &ceph_dir_iops; 1000 inode->i_fop = &ceph_dir_fops; 1001 break; 1002 default: 1003 pr_err("%s %llx.%llx BAD mode 0%o\n", __func__, 1004 ceph_vinop(inode), inode->i_mode); 1005 } 1006 1007 /* were we issued a capability? */ 1008 if (info_caps) { 1009 if (ceph_snap(inode) == CEPH_NOSNAP) { 1010 ceph_add_cap(inode, session, 1011 le64_to_cpu(info->cap.cap_id), 1012 info_caps, 1013 le32_to_cpu(info->cap.wanted), 1014 le32_to_cpu(info->cap.seq), 1015 le32_to_cpu(info->cap.mseq), 1016 le64_to_cpu(info->cap.realm), 1017 info->cap.flags, &new_cap); 1018 1019 /* set dir completion flag? */ 1020 if (S_ISDIR(inode->i_mode) && 1021 ci->i_files == 0 && ci->i_subdirs == 0 && 1022 (info_caps & CEPH_CAP_FILE_SHARED) && 1023 (issued & CEPH_CAP_FILE_EXCL) == 0 && 1024 !__ceph_dir_is_complete(ci)) { 1025 dout(" marking %p complete (empty)\n", inode); 1026 i_size_write(inode, 0); 1027 __ceph_dir_set_complete(ci, 1028 atomic64_read(&ci->i_release_count), 1029 atomic64_read(&ci->i_ordered_count)); 1030 } 1031 1032 wake = true; 1033 } else { 1034 dout(" %p got snap_caps %s\n", inode, 1035 ceph_cap_string(info_caps)); 1036 ci->i_snap_caps |= info_caps; 1037 } 1038 } 1039 1040 if (iinfo->inline_version > 0 && 1041 iinfo->inline_version >= ci->i_inline_version) { 1042 int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 1043 ci->i_inline_version = iinfo->inline_version; 1044 if (ci->i_inline_version != CEPH_INLINE_NONE && 1045 (locked_page || (info_caps & cache_caps))) 1046 fill_inline = true; 1047 } 1048 1049 if (cap_fmode >= 0) { 1050 if (!info_caps) 1051 pr_warn("mds issued no caps on %llx.%llx\n", 1052 ceph_vinop(inode)); 1053 __ceph_touch_fmode(ci, mdsc, cap_fmode); 1054 } 1055 1056 spin_unlock(&ci->i_ceph_lock); 1057 1058 if (fill_inline) 1059 ceph_fill_inline_data(inode, locked_page, 1060 iinfo->inline_data, iinfo->inline_len); 1061 1062 if (wake) 1063 wake_up_all(&ci->i_cap_wq); 1064 1065 /* queue truncate if we saw i_size decrease */ 1066 if (queue_trunc) 1067 ceph_queue_vmtruncate(inode); 1068 1069 /* populate frag tree */ 1070 if (S_ISDIR(inode->i_mode)) 1071 ceph_fill_fragtree(inode, &info->fragtree, dirinfo); 1072 1073 /* update delegation info? */ 1074 if (dirinfo) 1075 ceph_fill_dirfrag(inode, dirinfo); 1076 1077 err = 0; 1078 out: 1079 if (new_cap) 1080 ceph_put_cap(mdsc, new_cap); 1081 ceph_buffer_put(old_blob); 1082 ceph_buffer_put(xattr_blob); 1083 ceph_put_string(pool_ns); 1084 return err; 1085 } 1086 1087 /* 1088 * caller should hold session s_mutex and dentry->d_lock. 1089 */ 1090 static void __update_dentry_lease(struct inode *dir, struct dentry *dentry, 1091 struct ceph_mds_reply_lease *lease, 1092 struct ceph_mds_session *session, 1093 unsigned long from_time, 1094 struct ceph_mds_session **old_lease_session) 1095 { 1096 struct ceph_dentry_info *di = ceph_dentry(dentry); 1097 unsigned mask = le16_to_cpu(lease->mask); 1098 long unsigned duration = le32_to_cpu(lease->duration_ms); 1099 long unsigned ttl = from_time + (duration * HZ) / 1000; 1100 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000; 1101 1102 dout("update_dentry_lease %p duration %lu ms ttl %lu\n", 1103 dentry, duration, ttl); 1104 1105 /* only track leases on regular dentries */ 1106 if (ceph_snap(dir) != CEPH_NOSNAP) 1107 return; 1108 1109 if (mask & CEPH_LEASE_PRIMARY_LINK) 1110 di->flags |= CEPH_DENTRY_PRIMARY_LINK; 1111 else 1112 di->flags &= ~CEPH_DENTRY_PRIMARY_LINK; 1113 1114 di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen); 1115 if (!(mask & CEPH_LEASE_VALID)) { 1116 __ceph_dentry_dir_lease_touch(di); 1117 return; 1118 } 1119 1120 if (di->lease_gen == atomic_read(&session->s_cap_gen) && 1121 time_before(ttl, di->time)) 1122 return; /* we already have a newer lease. */ 1123 1124 if (di->lease_session && di->lease_session != session) { 1125 *old_lease_session = di->lease_session; 1126 di->lease_session = NULL; 1127 } 1128 1129 if (!di->lease_session) 1130 di->lease_session = ceph_get_mds_session(session); 1131 di->lease_gen = atomic_read(&session->s_cap_gen); 1132 di->lease_seq = le32_to_cpu(lease->seq); 1133 di->lease_renew_after = half_ttl; 1134 di->lease_renew_from = 0; 1135 di->time = ttl; 1136 1137 __ceph_dentry_lease_touch(di); 1138 } 1139 1140 static inline void update_dentry_lease(struct inode *dir, struct dentry *dentry, 1141 struct ceph_mds_reply_lease *lease, 1142 struct ceph_mds_session *session, 1143 unsigned long from_time) 1144 { 1145 struct ceph_mds_session *old_lease_session = NULL; 1146 spin_lock(&dentry->d_lock); 1147 __update_dentry_lease(dir, dentry, lease, session, from_time, 1148 &old_lease_session); 1149 spin_unlock(&dentry->d_lock); 1150 ceph_put_mds_session(old_lease_session); 1151 } 1152 1153 /* 1154 * update dentry lease without having parent inode locked 1155 */ 1156 static void update_dentry_lease_careful(struct dentry *dentry, 1157 struct ceph_mds_reply_lease *lease, 1158 struct ceph_mds_session *session, 1159 unsigned long from_time, 1160 char *dname, u32 dname_len, 1161 struct ceph_vino *pdvino, 1162 struct ceph_vino *ptvino) 1163 1164 { 1165 struct inode *dir; 1166 struct ceph_mds_session *old_lease_session = NULL; 1167 1168 spin_lock(&dentry->d_lock); 1169 /* make sure dentry's name matches target */ 1170 if (dentry->d_name.len != dname_len || 1171 memcmp(dentry->d_name.name, dname, dname_len)) 1172 goto out_unlock; 1173 1174 dir = d_inode(dentry->d_parent); 1175 /* make sure parent matches dvino */ 1176 if (!ceph_ino_compare(dir, pdvino)) 1177 goto out_unlock; 1178 1179 /* make sure dentry's inode matches target. NULL ptvino means that 1180 * we expect a negative dentry */ 1181 if (ptvino) { 1182 if (d_really_is_negative(dentry)) 1183 goto out_unlock; 1184 if (!ceph_ino_compare(d_inode(dentry), ptvino)) 1185 goto out_unlock; 1186 } else { 1187 if (d_really_is_positive(dentry)) 1188 goto out_unlock; 1189 } 1190 1191 __update_dentry_lease(dir, dentry, lease, session, 1192 from_time, &old_lease_session); 1193 out_unlock: 1194 spin_unlock(&dentry->d_lock); 1195 ceph_put_mds_session(old_lease_session); 1196 } 1197 1198 /* 1199 * splice a dentry to an inode. 1200 * caller must hold directory i_mutex for this to be safe. 1201 */ 1202 static int splice_dentry(struct dentry **pdn, struct inode *in) 1203 { 1204 struct dentry *dn = *pdn; 1205 struct dentry *realdn; 1206 1207 BUG_ON(d_inode(dn)); 1208 1209 if (S_ISDIR(in->i_mode)) { 1210 /* If inode is directory, d_splice_alias() below will remove 1211 * 'realdn' from its origin parent. We need to ensure that 1212 * origin parent's readdir cache will not reference 'realdn' 1213 */ 1214 realdn = d_find_any_alias(in); 1215 if (realdn) { 1216 struct ceph_dentry_info *di = ceph_dentry(realdn); 1217 spin_lock(&realdn->d_lock); 1218 1219 realdn->d_op->d_prune(realdn); 1220 1221 di->time = jiffies; 1222 di->lease_shared_gen = 0; 1223 di->offset = 0; 1224 1225 spin_unlock(&realdn->d_lock); 1226 dput(realdn); 1227 } 1228 } 1229 1230 /* dn must be unhashed */ 1231 if (!d_unhashed(dn)) 1232 d_drop(dn); 1233 realdn = d_splice_alias(in, dn); 1234 if (IS_ERR(realdn)) { 1235 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n", 1236 PTR_ERR(realdn), dn, in, ceph_vinop(in)); 1237 return PTR_ERR(realdn); 1238 } 1239 1240 if (realdn) { 1241 dout("dn %p (%d) spliced with %p (%d) " 1242 "inode %p ino %llx.%llx\n", 1243 dn, d_count(dn), 1244 realdn, d_count(realdn), 1245 d_inode(realdn), ceph_vinop(d_inode(realdn))); 1246 dput(dn); 1247 *pdn = realdn; 1248 } else { 1249 BUG_ON(!ceph_dentry(dn)); 1250 dout("dn %p attached to %p ino %llx.%llx\n", 1251 dn, d_inode(dn), ceph_vinop(d_inode(dn))); 1252 } 1253 return 0; 1254 } 1255 1256 /* 1257 * Incorporate results into the local cache. This is either just 1258 * one inode, or a directory, dentry, and possibly linked-to inode (e.g., 1259 * after a lookup). 1260 * 1261 * A reply may contain 1262 * a directory inode along with a dentry. 1263 * and/or a target inode 1264 * 1265 * Called with snap_rwsem (read). 1266 */ 1267 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req) 1268 { 1269 struct ceph_mds_session *session = req->r_session; 1270 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1271 struct inode *in = NULL; 1272 struct ceph_vino tvino, dvino; 1273 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 1274 int err = 0; 1275 1276 dout("fill_trace %p is_dentry %d is_target %d\n", req, 1277 rinfo->head->is_dentry, rinfo->head->is_target); 1278 1279 if (!rinfo->head->is_target && !rinfo->head->is_dentry) { 1280 dout("fill_trace reply is empty!\n"); 1281 if (rinfo->head->result == 0 && req->r_parent) 1282 ceph_invalidate_dir_request(req); 1283 return 0; 1284 } 1285 1286 if (rinfo->head->is_dentry) { 1287 struct inode *dir = req->r_parent; 1288 1289 if (dir) { 1290 err = ceph_fill_inode(dir, NULL, &rinfo->diri, 1291 rinfo->dirfrag, session, -1, 1292 &req->r_caps_reservation); 1293 if (err < 0) 1294 goto done; 1295 } else { 1296 WARN_ON_ONCE(1); 1297 } 1298 1299 if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME && 1300 test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) && 1301 !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { 1302 struct qstr dname; 1303 struct dentry *dn, *parent; 1304 1305 BUG_ON(!rinfo->head->is_target); 1306 BUG_ON(req->r_dentry); 1307 1308 parent = d_find_any_alias(dir); 1309 BUG_ON(!parent); 1310 1311 dname.name = rinfo->dname; 1312 dname.len = rinfo->dname_len; 1313 dname.hash = full_name_hash(parent, dname.name, dname.len); 1314 tvino.ino = le64_to_cpu(rinfo->targeti.in->ino); 1315 tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid); 1316 retry_lookup: 1317 dn = d_lookup(parent, &dname); 1318 dout("d_lookup on parent=%p name=%.*s got %p\n", 1319 parent, dname.len, dname.name, dn); 1320 1321 if (!dn) { 1322 dn = d_alloc(parent, &dname); 1323 dout("d_alloc %p '%.*s' = %p\n", parent, 1324 dname.len, dname.name, dn); 1325 if (!dn) { 1326 dput(parent); 1327 err = -ENOMEM; 1328 goto done; 1329 } 1330 err = 0; 1331 } else if (d_really_is_positive(dn) && 1332 (ceph_ino(d_inode(dn)) != tvino.ino || 1333 ceph_snap(d_inode(dn)) != tvino.snap)) { 1334 dout(" dn %p points to wrong inode %p\n", 1335 dn, d_inode(dn)); 1336 ceph_dir_clear_ordered(dir); 1337 d_delete(dn); 1338 dput(dn); 1339 goto retry_lookup; 1340 } 1341 1342 req->r_dentry = dn; 1343 dput(parent); 1344 } 1345 } 1346 1347 if (rinfo->head->is_target) { 1348 /* Should be filled in by handle_reply */ 1349 BUG_ON(!req->r_target_inode); 1350 1351 in = req->r_target_inode; 1352 err = ceph_fill_inode(in, req->r_locked_page, &rinfo->targeti, 1353 NULL, session, 1354 (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) && 1355 !test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) && 1356 rinfo->head->result == 0) ? req->r_fmode : -1, 1357 &req->r_caps_reservation); 1358 if (err < 0) { 1359 pr_err("ceph_fill_inode badness %p %llx.%llx\n", 1360 in, ceph_vinop(in)); 1361 req->r_target_inode = NULL; 1362 if (in->i_state & I_NEW) 1363 discard_new_inode(in); 1364 else 1365 iput(in); 1366 goto done; 1367 } 1368 if (in->i_state & I_NEW) 1369 unlock_new_inode(in); 1370 } 1371 1372 /* 1373 * ignore null lease/binding on snapdir ENOENT, or else we 1374 * will have trouble splicing in the virtual snapdir later 1375 */ 1376 if (rinfo->head->is_dentry && 1377 !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) && 1378 test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) && 1379 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, 1380 fsc->mount_options->snapdir_name, 1381 req->r_dentry->d_name.len))) { 1382 /* 1383 * lookup link rename : null -> possibly existing inode 1384 * mknod symlink mkdir : null -> new inode 1385 * unlink : linked -> null 1386 */ 1387 struct inode *dir = req->r_parent; 1388 struct dentry *dn = req->r_dentry; 1389 bool have_dir_cap, have_lease; 1390 1391 BUG_ON(!dn); 1392 BUG_ON(!dir); 1393 BUG_ON(d_inode(dn->d_parent) != dir); 1394 1395 dvino.ino = le64_to_cpu(rinfo->diri.in->ino); 1396 dvino.snap = le64_to_cpu(rinfo->diri.in->snapid); 1397 1398 BUG_ON(ceph_ino(dir) != dvino.ino); 1399 BUG_ON(ceph_snap(dir) != dvino.snap); 1400 1401 /* do we have a lease on the whole dir? */ 1402 have_dir_cap = 1403 (le32_to_cpu(rinfo->diri.in->cap.caps) & 1404 CEPH_CAP_FILE_SHARED); 1405 1406 /* do we have a dn lease? */ 1407 have_lease = have_dir_cap || 1408 le32_to_cpu(rinfo->dlease->duration_ms); 1409 if (!have_lease) 1410 dout("fill_trace no dentry lease or dir cap\n"); 1411 1412 /* rename? */ 1413 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) { 1414 struct inode *olddir = req->r_old_dentry_dir; 1415 BUG_ON(!olddir); 1416 1417 dout(" src %p '%pd' dst %p '%pd'\n", 1418 req->r_old_dentry, 1419 req->r_old_dentry, 1420 dn, dn); 1421 dout("fill_trace doing d_move %p -> %p\n", 1422 req->r_old_dentry, dn); 1423 1424 /* d_move screws up sibling dentries' offsets */ 1425 ceph_dir_clear_ordered(dir); 1426 ceph_dir_clear_ordered(olddir); 1427 1428 d_move(req->r_old_dentry, dn); 1429 dout(" src %p '%pd' dst %p '%pd'\n", 1430 req->r_old_dentry, 1431 req->r_old_dentry, 1432 dn, dn); 1433 1434 /* ensure target dentry is invalidated, despite 1435 rehashing bug in vfs_rename_dir */ 1436 ceph_invalidate_dentry_lease(dn); 1437 1438 dout("dn %p gets new offset %lld\n", req->r_old_dentry, 1439 ceph_dentry(req->r_old_dentry)->offset); 1440 1441 /* swap r_dentry and r_old_dentry in case that 1442 * splice_dentry() gets called later. This is safe 1443 * because no other place will use them */ 1444 req->r_dentry = req->r_old_dentry; 1445 req->r_old_dentry = dn; 1446 dn = req->r_dentry; 1447 } 1448 1449 /* null dentry? */ 1450 if (!rinfo->head->is_target) { 1451 dout("fill_trace null dentry\n"); 1452 if (d_really_is_positive(dn)) { 1453 dout("d_delete %p\n", dn); 1454 ceph_dir_clear_ordered(dir); 1455 d_delete(dn); 1456 } else if (have_lease) { 1457 if (d_unhashed(dn)) 1458 d_add(dn, NULL); 1459 update_dentry_lease(dir, dn, 1460 rinfo->dlease, session, 1461 req->r_request_started); 1462 } 1463 goto done; 1464 } 1465 1466 /* attach proper inode */ 1467 if (d_really_is_negative(dn)) { 1468 ceph_dir_clear_ordered(dir); 1469 ihold(in); 1470 err = splice_dentry(&req->r_dentry, in); 1471 if (err < 0) 1472 goto done; 1473 dn = req->r_dentry; /* may have spliced */ 1474 } else if (d_really_is_positive(dn) && d_inode(dn) != in) { 1475 dout(" %p links to %p %llx.%llx, not %llx.%llx\n", 1476 dn, d_inode(dn), ceph_vinop(d_inode(dn)), 1477 ceph_vinop(in)); 1478 d_invalidate(dn); 1479 have_lease = false; 1480 } 1481 1482 if (have_lease) { 1483 update_dentry_lease(dir, dn, 1484 rinfo->dlease, session, 1485 req->r_request_started); 1486 } 1487 dout(" final dn %p\n", dn); 1488 } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP || 1489 req->r_op == CEPH_MDS_OP_MKSNAP) && 1490 test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) && 1491 !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) { 1492 struct inode *dir = req->r_parent; 1493 1494 /* fill out a snapdir LOOKUPSNAP dentry */ 1495 BUG_ON(!dir); 1496 BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR); 1497 BUG_ON(!req->r_dentry); 1498 dout(" linking snapped dir %p to dn %p\n", in, req->r_dentry); 1499 ceph_dir_clear_ordered(dir); 1500 ihold(in); 1501 err = splice_dentry(&req->r_dentry, in); 1502 if (err < 0) 1503 goto done; 1504 } else if (rinfo->head->is_dentry && req->r_dentry) { 1505 /* parent inode is not locked, be carefull */ 1506 struct ceph_vino *ptvino = NULL; 1507 dvino.ino = le64_to_cpu(rinfo->diri.in->ino); 1508 dvino.snap = le64_to_cpu(rinfo->diri.in->snapid); 1509 if (rinfo->head->is_target) { 1510 tvino.ino = le64_to_cpu(rinfo->targeti.in->ino); 1511 tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid); 1512 ptvino = &tvino; 1513 } 1514 update_dentry_lease_careful(req->r_dentry, rinfo->dlease, 1515 session, req->r_request_started, 1516 rinfo->dname, rinfo->dname_len, 1517 &dvino, ptvino); 1518 } 1519 done: 1520 dout("fill_trace done err=%d\n", err); 1521 return err; 1522 } 1523 1524 /* 1525 * Prepopulate our cache with readdir results, leases, etc. 1526 */ 1527 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req, 1528 struct ceph_mds_session *session) 1529 { 1530 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1531 int i, err = 0; 1532 1533 for (i = 0; i < rinfo->dir_nr; i++) { 1534 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i; 1535 struct ceph_vino vino; 1536 struct inode *in; 1537 int rc; 1538 1539 vino.ino = le64_to_cpu(rde->inode.in->ino); 1540 vino.snap = le64_to_cpu(rde->inode.in->snapid); 1541 1542 in = ceph_get_inode(req->r_dentry->d_sb, vino); 1543 if (IS_ERR(in)) { 1544 err = PTR_ERR(in); 1545 dout("new_inode badness got %d\n", err); 1546 continue; 1547 } 1548 rc = ceph_fill_inode(in, NULL, &rde->inode, NULL, session, 1549 -1, &req->r_caps_reservation); 1550 if (rc < 0) { 1551 pr_err("ceph_fill_inode badness on %p got %d\n", 1552 in, rc); 1553 err = rc; 1554 if (in->i_state & I_NEW) { 1555 ihold(in); 1556 discard_new_inode(in); 1557 } 1558 } else if (in->i_state & I_NEW) { 1559 unlock_new_inode(in); 1560 } 1561 1562 iput(in); 1563 } 1564 1565 return err; 1566 } 1567 1568 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl) 1569 { 1570 if (ctl->page) { 1571 kunmap(ctl->page); 1572 put_page(ctl->page); 1573 ctl->page = NULL; 1574 } 1575 } 1576 1577 static int fill_readdir_cache(struct inode *dir, struct dentry *dn, 1578 struct ceph_readdir_cache_control *ctl, 1579 struct ceph_mds_request *req) 1580 { 1581 struct ceph_inode_info *ci = ceph_inode(dir); 1582 unsigned nsize = PAGE_SIZE / sizeof(struct dentry*); 1583 unsigned idx = ctl->index % nsize; 1584 pgoff_t pgoff = ctl->index / nsize; 1585 1586 if (!ctl->page || pgoff != page_index(ctl->page)) { 1587 ceph_readdir_cache_release(ctl); 1588 if (idx == 0) 1589 ctl->page = grab_cache_page(&dir->i_data, pgoff); 1590 else 1591 ctl->page = find_lock_page(&dir->i_data, pgoff); 1592 if (!ctl->page) { 1593 ctl->index = -1; 1594 return idx == 0 ? -ENOMEM : 0; 1595 } 1596 /* reading/filling the cache are serialized by 1597 * i_mutex, no need to use page lock */ 1598 unlock_page(ctl->page); 1599 ctl->dentries = kmap(ctl->page); 1600 if (idx == 0) 1601 memset(ctl->dentries, 0, PAGE_SIZE); 1602 } 1603 1604 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) && 1605 req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) { 1606 dout("readdir cache dn %p idx %d\n", dn, ctl->index); 1607 ctl->dentries[idx] = dn; 1608 ctl->index++; 1609 } else { 1610 dout("disable readdir cache\n"); 1611 ctl->index = -1; 1612 } 1613 return 0; 1614 } 1615 1616 int ceph_readdir_prepopulate(struct ceph_mds_request *req, 1617 struct ceph_mds_session *session) 1618 { 1619 struct dentry *parent = req->r_dentry; 1620 struct ceph_inode_info *ci = ceph_inode(d_inode(parent)); 1621 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1622 struct qstr dname; 1623 struct dentry *dn; 1624 struct inode *in; 1625 int err = 0, skipped = 0, ret, i; 1626 u32 frag = le32_to_cpu(req->r_args.readdir.frag); 1627 u32 last_hash = 0; 1628 u32 fpos_offset; 1629 struct ceph_readdir_cache_control cache_ctl = {}; 1630 1631 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) 1632 return readdir_prepopulate_inodes_only(req, session); 1633 1634 if (rinfo->hash_order) { 1635 if (req->r_path2) { 1636 last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash, 1637 req->r_path2, 1638 strlen(req->r_path2)); 1639 last_hash = ceph_frag_value(last_hash); 1640 } else if (rinfo->offset_hash) { 1641 /* mds understands offset_hash */ 1642 WARN_ON_ONCE(req->r_readdir_offset != 2); 1643 last_hash = le32_to_cpu(req->r_args.readdir.offset_hash); 1644 } 1645 } 1646 1647 if (rinfo->dir_dir && 1648 le32_to_cpu(rinfo->dir_dir->frag) != frag) { 1649 dout("readdir_prepopulate got new frag %x -> %x\n", 1650 frag, le32_to_cpu(rinfo->dir_dir->frag)); 1651 frag = le32_to_cpu(rinfo->dir_dir->frag); 1652 if (!rinfo->hash_order) 1653 req->r_readdir_offset = 2; 1654 } 1655 1656 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) { 1657 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n", 1658 rinfo->dir_nr, parent); 1659 } else { 1660 dout("readdir_prepopulate %d items under dn %p\n", 1661 rinfo->dir_nr, parent); 1662 if (rinfo->dir_dir) 1663 ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir); 1664 1665 if (ceph_frag_is_leftmost(frag) && 1666 req->r_readdir_offset == 2 && 1667 !(rinfo->hash_order && last_hash)) { 1668 /* note dir version at start of readdir so we can 1669 * tell if any dentries get dropped */ 1670 req->r_dir_release_cnt = 1671 atomic64_read(&ci->i_release_count); 1672 req->r_dir_ordered_cnt = 1673 atomic64_read(&ci->i_ordered_count); 1674 req->r_readdir_cache_idx = 0; 1675 } 1676 } 1677 1678 cache_ctl.index = req->r_readdir_cache_idx; 1679 fpos_offset = req->r_readdir_offset; 1680 1681 /* FIXME: release caps/leases if error occurs */ 1682 for (i = 0; i < rinfo->dir_nr; i++) { 1683 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i; 1684 struct ceph_vino tvino; 1685 1686 dname.name = rde->name; 1687 dname.len = rde->name_len; 1688 dname.hash = full_name_hash(parent, dname.name, dname.len); 1689 1690 tvino.ino = le64_to_cpu(rde->inode.in->ino); 1691 tvino.snap = le64_to_cpu(rde->inode.in->snapid); 1692 1693 if (rinfo->hash_order) { 1694 u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash, 1695 rde->name, rde->name_len); 1696 hash = ceph_frag_value(hash); 1697 if (hash != last_hash) 1698 fpos_offset = 2; 1699 last_hash = hash; 1700 rde->offset = ceph_make_fpos(hash, fpos_offset++, true); 1701 } else { 1702 rde->offset = ceph_make_fpos(frag, fpos_offset++, false); 1703 } 1704 1705 retry_lookup: 1706 dn = d_lookup(parent, &dname); 1707 dout("d_lookup on parent=%p name=%.*s got %p\n", 1708 parent, dname.len, dname.name, dn); 1709 1710 if (!dn) { 1711 dn = d_alloc(parent, &dname); 1712 dout("d_alloc %p '%.*s' = %p\n", parent, 1713 dname.len, dname.name, dn); 1714 if (!dn) { 1715 dout("d_alloc badness\n"); 1716 err = -ENOMEM; 1717 goto out; 1718 } 1719 } else if (d_really_is_positive(dn) && 1720 (ceph_ino(d_inode(dn)) != tvino.ino || 1721 ceph_snap(d_inode(dn)) != tvino.snap)) { 1722 struct ceph_dentry_info *di = ceph_dentry(dn); 1723 dout(" dn %p points to wrong inode %p\n", 1724 dn, d_inode(dn)); 1725 1726 spin_lock(&dn->d_lock); 1727 if (di->offset > 0 && 1728 di->lease_shared_gen == 1729 atomic_read(&ci->i_shared_gen)) { 1730 __ceph_dir_clear_ordered(ci); 1731 di->offset = 0; 1732 } 1733 spin_unlock(&dn->d_lock); 1734 1735 d_delete(dn); 1736 dput(dn); 1737 goto retry_lookup; 1738 } 1739 1740 /* inode */ 1741 if (d_really_is_positive(dn)) { 1742 in = d_inode(dn); 1743 } else { 1744 in = ceph_get_inode(parent->d_sb, tvino); 1745 if (IS_ERR(in)) { 1746 dout("new_inode badness\n"); 1747 d_drop(dn); 1748 dput(dn); 1749 err = PTR_ERR(in); 1750 goto out; 1751 } 1752 } 1753 1754 ret = ceph_fill_inode(in, NULL, &rde->inode, NULL, session, 1755 -1, &req->r_caps_reservation); 1756 if (ret < 0) { 1757 pr_err("ceph_fill_inode badness on %p\n", in); 1758 if (d_really_is_negative(dn)) { 1759 if (in->i_state & I_NEW) { 1760 ihold(in); 1761 discard_new_inode(in); 1762 } 1763 iput(in); 1764 } 1765 d_drop(dn); 1766 err = ret; 1767 goto next_item; 1768 } 1769 if (in->i_state & I_NEW) 1770 unlock_new_inode(in); 1771 1772 if (d_really_is_negative(dn)) { 1773 if (ceph_security_xattr_deadlock(in)) { 1774 dout(" skip splicing dn %p to inode %p" 1775 " (security xattr deadlock)\n", dn, in); 1776 iput(in); 1777 skipped++; 1778 goto next_item; 1779 } 1780 1781 err = splice_dentry(&dn, in); 1782 if (err < 0) 1783 goto next_item; 1784 } 1785 1786 ceph_dentry(dn)->offset = rde->offset; 1787 1788 update_dentry_lease(d_inode(parent), dn, 1789 rde->lease, req->r_session, 1790 req->r_request_started); 1791 1792 if (err == 0 && skipped == 0 && cache_ctl.index >= 0) { 1793 ret = fill_readdir_cache(d_inode(parent), dn, 1794 &cache_ctl, req); 1795 if (ret < 0) 1796 err = ret; 1797 } 1798 next_item: 1799 dput(dn); 1800 } 1801 out: 1802 if (err == 0 && skipped == 0) { 1803 set_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags); 1804 req->r_readdir_cache_idx = cache_ctl.index; 1805 } 1806 ceph_readdir_cache_release(&cache_ctl); 1807 dout("readdir_prepopulate done\n"); 1808 return err; 1809 } 1810 1811 bool ceph_inode_set_size(struct inode *inode, loff_t size) 1812 { 1813 struct ceph_inode_info *ci = ceph_inode(inode); 1814 bool ret; 1815 1816 spin_lock(&ci->i_ceph_lock); 1817 dout("set_size %p %llu -> %llu\n", inode, i_size_read(inode), size); 1818 i_size_write(inode, size); 1819 inode->i_blocks = calc_inode_blocks(size); 1820 1821 ret = __ceph_should_report_size(ci); 1822 1823 spin_unlock(&ci->i_ceph_lock); 1824 return ret; 1825 } 1826 1827 void ceph_queue_inode_work(struct inode *inode, int work_bit) 1828 { 1829 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1830 struct ceph_inode_info *ci = ceph_inode(inode); 1831 set_bit(work_bit, &ci->i_work_mask); 1832 1833 ihold(inode); 1834 if (queue_work(fsc->inode_wq, &ci->i_work)) { 1835 dout("queue_inode_work %p, mask=%lx\n", inode, ci->i_work_mask); 1836 } else { 1837 dout("queue_inode_work %p already queued, mask=%lx\n", 1838 inode, ci->i_work_mask); 1839 iput(inode); 1840 } 1841 } 1842 1843 static void ceph_do_invalidate_pages(struct inode *inode) 1844 { 1845 struct ceph_inode_info *ci = ceph_inode(inode); 1846 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1847 u32 orig_gen; 1848 int check = 0; 1849 1850 mutex_lock(&ci->i_truncate_mutex); 1851 1852 if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) { 1853 pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n", 1854 inode, ceph_ino(inode)); 1855 mapping_set_error(inode->i_mapping, -EIO); 1856 truncate_pagecache(inode, 0); 1857 mutex_unlock(&ci->i_truncate_mutex); 1858 goto out; 1859 } 1860 1861 spin_lock(&ci->i_ceph_lock); 1862 dout("invalidate_pages %p gen %d revoking %d\n", inode, 1863 ci->i_rdcache_gen, ci->i_rdcache_revoking); 1864 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { 1865 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE)) 1866 check = 1; 1867 spin_unlock(&ci->i_ceph_lock); 1868 mutex_unlock(&ci->i_truncate_mutex); 1869 goto out; 1870 } 1871 orig_gen = ci->i_rdcache_gen; 1872 spin_unlock(&ci->i_ceph_lock); 1873 1874 ceph_fscache_invalidate(inode); 1875 if (invalidate_inode_pages2(inode->i_mapping) < 0) { 1876 pr_err("invalidate_pages %p fails\n", inode); 1877 } 1878 1879 spin_lock(&ci->i_ceph_lock); 1880 if (orig_gen == ci->i_rdcache_gen && 1881 orig_gen == ci->i_rdcache_revoking) { 1882 dout("invalidate_pages %p gen %d successful\n", inode, 1883 ci->i_rdcache_gen); 1884 ci->i_rdcache_revoking--; 1885 check = 1; 1886 } else { 1887 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n", 1888 inode, orig_gen, ci->i_rdcache_gen, 1889 ci->i_rdcache_revoking); 1890 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE)) 1891 check = 1; 1892 } 1893 spin_unlock(&ci->i_ceph_lock); 1894 mutex_unlock(&ci->i_truncate_mutex); 1895 out: 1896 if (check) 1897 ceph_check_caps(ci, 0, NULL); 1898 } 1899 1900 /* 1901 * Make sure any pending truncation is applied before doing anything 1902 * that may depend on it. 1903 */ 1904 void __ceph_do_pending_vmtruncate(struct inode *inode) 1905 { 1906 struct ceph_inode_info *ci = ceph_inode(inode); 1907 u64 to; 1908 int wrbuffer_refs, finish = 0; 1909 1910 mutex_lock(&ci->i_truncate_mutex); 1911 retry: 1912 spin_lock(&ci->i_ceph_lock); 1913 if (ci->i_truncate_pending == 0) { 1914 dout("__do_pending_vmtruncate %p none pending\n", inode); 1915 spin_unlock(&ci->i_ceph_lock); 1916 mutex_unlock(&ci->i_truncate_mutex); 1917 return; 1918 } 1919 1920 /* 1921 * make sure any dirty snapped pages are flushed before we 1922 * possibly truncate them.. so write AND block! 1923 */ 1924 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) { 1925 spin_unlock(&ci->i_ceph_lock); 1926 dout("__do_pending_vmtruncate %p flushing snaps first\n", 1927 inode); 1928 filemap_write_and_wait_range(&inode->i_data, 0, 1929 inode->i_sb->s_maxbytes); 1930 goto retry; 1931 } 1932 1933 /* there should be no reader or writer */ 1934 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref); 1935 1936 to = ci->i_truncate_size; 1937 wrbuffer_refs = ci->i_wrbuffer_ref; 1938 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode, 1939 ci->i_truncate_pending, to); 1940 spin_unlock(&ci->i_ceph_lock); 1941 1942 truncate_pagecache(inode, to); 1943 1944 spin_lock(&ci->i_ceph_lock); 1945 if (to == ci->i_truncate_size) { 1946 ci->i_truncate_pending = 0; 1947 finish = 1; 1948 } 1949 spin_unlock(&ci->i_ceph_lock); 1950 if (!finish) 1951 goto retry; 1952 1953 mutex_unlock(&ci->i_truncate_mutex); 1954 1955 if (wrbuffer_refs == 0) 1956 ceph_check_caps(ci, 0, NULL); 1957 1958 wake_up_all(&ci->i_cap_wq); 1959 } 1960 1961 static void ceph_inode_work(struct work_struct *work) 1962 { 1963 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1964 i_work); 1965 struct inode *inode = &ci->vfs_inode; 1966 1967 if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) { 1968 dout("writeback %p\n", inode); 1969 filemap_fdatawrite(&inode->i_data); 1970 } 1971 if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask)) 1972 ceph_do_invalidate_pages(inode); 1973 1974 if (test_and_clear_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask)) 1975 __ceph_do_pending_vmtruncate(inode); 1976 1977 if (test_and_clear_bit(CEPH_I_WORK_CHECK_CAPS, &ci->i_work_mask)) 1978 ceph_check_caps(ci, 0, NULL); 1979 1980 if (test_and_clear_bit(CEPH_I_WORK_FLUSH_SNAPS, &ci->i_work_mask)) 1981 ceph_flush_snaps(ci, NULL); 1982 1983 iput(inode); 1984 } 1985 1986 /* 1987 * symlinks 1988 */ 1989 static const struct inode_operations ceph_symlink_iops = { 1990 .get_link = simple_get_link, 1991 .setattr = ceph_setattr, 1992 .getattr = ceph_getattr, 1993 .listxattr = ceph_listxattr, 1994 }; 1995 1996 int __ceph_setattr(struct inode *inode, struct iattr *attr) 1997 { 1998 struct ceph_inode_info *ci = ceph_inode(inode); 1999 unsigned int ia_valid = attr->ia_valid; 2000 struct ceph_mds_request *req; 2001 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 2002 struct ceph_cap_flush *prealloc_cf; 2003 int issued; 2004 int release = 0, dirtied = 0; 2005 int mask = 0; 2006 int err = 0; 2007 int inode_dirty_flags = 0; 2008 bool lock_snap_rwsem = false; 2009 2010 prealloc_cf = ceph_alloc_cap_flush(); 2011 if (!prealloc_cf) 2012 return -ENOMEM; 2013 2014 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR, 2015 USE_AUTH_MDS); 2016 if (IS_ERR(req)) { 2017 ceph_free_cap_flush(prealloc_cf); 2018 return PTR_ERR(req); 2019 } 2020 2021 spin_lock(&ci->i_ceph_lock); 2022 issued = __ceph_caps_issued(ci, NULL); 2023 2024 if (!ci->i_head_snapc && 2025 (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) { 2026 lock_snap_rwsem = true; 2027 if (!down_read_trylock(&mdsc->snap_rwsem)) { 2028 spin_unlock(&ci->i_ceph_lock); 2029 down_read(&mdsc->snap_rwsem); 2030 spin_lock(&ci->i_ceph_lock); 2031 issued = __ceph_caps_issued(ci, NULL); 2032 } 2033 } 2034 2035 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued)); 2036 2037 if (ia_valid & ATTR_UID) { 2038 dout("setattr %p uid %d -> %d\n", inode, 2039 from_kuid(&init_user_ns, inode->i_uid), 2040 from_kuid(&init_user_ns, attr->ia_uid)); 2041 if (issued & CEPH_CAP_AUTH_EXCL) { 2042 inode->i_uid = attr->ia_uid; 2043 dirtied |= CEPH_CAP_AUTH_EXCL; 2044 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 2045 !uid_eq(attr->ia_uid, inode->i_uid)) { 2046 req->r_args.setattr.uid = cpu_to_le32( 2047 from_kuid(&init_user_ns, attr->ia_uid)); 2048 mask |= CEPH_SETATTR_UID; 2049 release |= CEPH_CAP_AUTH_SHARED; 2050 } 2051 } 2052 if (ia_valid & ATTR_GID) { 2053 dout("setattr %p gid %d -> %d\n", inode, 2054 from_kgid(&init_user_ns, inode->i_gid), 2055 from_kgid(&init_user_ns, attr->ia_gid)); 2056 if (issued & CEPH_CAP_AUTH_EXCL) { 2057 inode->i_gid = attr->ia_gid; 2058 dirtied |= CEPH_CAP_AUTH_EXCL; 2059 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 2060 !gid_eq(attr->ia_gid, inode->i_gid)) { 2061 req->r_args.setattr.gid = cpu_to_le32( 2062 from_kgid(&init_user_ns, attr->ia_gid)); 2063 mask |= CEPH_SETATTR_GID; 2064 release |= CEPH_CAP_AUTH_SHARED; 2065 } 2066 } 2067 if (ia_valid & ATTR_MODE) { 2068 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode, 2069 attr->ia_mode); 2070 if (issued & CEPH_CAP_AUTH_EXCL) { 2071 inode->i_mode = attr->ia_mode; 2072 dirtied |= CEPH_CAP_AUTH_EXCL; 2073 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 2074 attr->ia_mode != inode->i_mode) { 2075 inode->i_mode = attr->ia_mode; 2076 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode); 2077 mask |= CEPH_SETATTR_MODE; 2078 release |= CEPH_CAP_AUTH_SHARED; 2079 } 2080 } 2081 2082 if (ia_valid & ATTR_ATIME) { 2083 dout("setattr %p atime %lld.%ld -> %lld.%ld\n", inode, 2084 inode->i_atime.tv_sec, inode->i_atime.tv_nsec, 2085 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec); 2086 if (issued & CEPH_CAP_FILE_EXCL) { 2087 ci->i_time_warp_seq++; 2088 inode->i_atime = attr->ia_atime; 2089 dirtied |= CEPH_CAP_FILE_EXCL; 2090 } else if ((issued & CEPH_CAP_FILE_WR) && 2091 timespec64_compare(&inode->i_atime, 2092 &attr->ia_atime) < 0) { 2093 inode->i_atime = attr->ia_atime; 2094 dirtied |= CEPH_CAP_FILE_WR; 2095 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 2096 !timespec64_equal(&inode->i_atime, &attr->ia_atime)) { 2097 ceph_encode_timespec64(&req->r_args.setattr.atime, 2098 &attr->ia_atime); 2099 mask |= CEPH_SETATTR_ATIME; 2100 release |= CEPH_CAP_FILE_SHARED | 2101 CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR; 2102 } 2103 } 2104 if (ia_valid & ATTR_SIZE) { 2105 loff_t isize = i_size_read(inode); 2106 2107 dout("setattr %p size %lld -> %lld\n", inode, isize, attr->ia_size); 2108 if ((issued & CEPH_CAP_FILE_EXCL) && attr->ia_size > isize) { 2109 i_size_write(inode, attr->ia_size); 2110 inode->i_blocks = calc_inode_blocks(attr->ia_size); 2111 ci->i_reported_size = attr->ia_size; 2112 dirtied |= CEPH_CAP_FILE_EXCL; 2113 ia_valid |= ATTR_MTIME; 2114 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 2115 attr->ia_size != isize) { 2116 req->r_args.setattr.size = cpu_to_le64(attr->ia_size); 2117 req->r_args.setattr.old_size = cpu_to_le64(isize); 2118 mask |= CEPH_SETATTR_SIZE; 2119 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL | 2120 CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR; 2121 } 2122 } 2123 if (ia_valid & ATTR_MTIME) { 2124 dout("setattr %p mtime %lld.%ld -> %lld.%ld\n", inode, 2125 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 2126 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec); 2127 if (issued & CEPH_CAP_FILE_EXCL) { 2128 ci->i_time_warp_seq++; 2129 inode->i_mtime = attr->ia_mtime; 2130 dirtied |= CEPH_CAP_FILE_EXCL; 2131 } else if ((issued & CEPH_CAP_FILE_WR) && 2132 timespec64_compare(&inode->i_mtime, 2133 &attr->ia_mtime) < 0) { 2134 inode->i_mtime = attr->ia_mtime; 2135 dirtied |= CEPH_CAP_FILE_WR; 2136 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 2137 !timespec64_equal(&inode->i_mtime, &attr->ia_mtime)) { 2138 ceph_encode_timespec64(&req->r_args.setattr.mtime, 2139 &attr->ia_mtime); 2140 mask |= CEPH_SETATTR_MTIME; 2141 release |= CEPH_CAP_FILE_SHARED | 2142 CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR; 2143 } 2144 } 2145 2146 /* these do nothing */ 2147 if (ia_valid & ATTR_CTIME) { 2148 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME| 2149 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0; 2150 dout("setattr %p ctime %lld.%ld -> %lld.%ld (%s)\n", inode, 2151 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 2152 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, 2153 only ? "ctime only" : "ignored"); 2154 if (only) { 2155 /* 2156 * if kernel wants to dirty ctime but nothing else, 2157 * we need to choose a cap to dirty under, or do 2158 * a almost-no-op setattr 2159 */ 2160 if (issued & CEPH_CAP_AUTH_EXCL) 2161 dirtied |= CEPH_CAP_AUTH_EXCL; 2162 else if (issued & CEPH_CAP_FILE_EXCL) 2163 dirtied |= CEPH_CAP_FILE_EXCL; 2164 else if (issued & CEPH_CAP_XATTR_EXCL) 2165 dirtied |= CEPH_CAP_XATTR_EXCL; 2166 else 2167 mask |= CEPH_SETATTR_CTIME; 2168 } 2169 } 2170 if (ia_valid & ATTR_FILE) 2171 dout("setattr %p ATTR_FILE ... hrm!\n", inode); 2172 2173 if (dirtied) { 2174 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied, 2175 &prealloc_cf); 2176 inode->i_ctime = attr->ia_ctime; 2177 } 2178 2179 release &= issued; 2180 spin_unlock(&ci->i_ceph_lock); 2181 if (lock_snap_rwsem) 2182 up_read(&mdsc->snap_rwsem); 2183 2184 if (inode_dirty_flags) 2185 __mark_inode_dirty(inode, inode_dirty_flags); 2186 2187 2188 if (mask) { 2189 req->r_inode = inode; 2190 ihold(inode); 2191 req->r_inode_drop = release; 2192 req->r_args.setattr.mask = cpu_to_le32(mask); 2193 req->r_num_caps = 1; 2194 req->r_stamp = attr->ia_ctime; 2195 err = ceph_mdsc_do_request(mdsc, NULL, req); 2196 } 2197 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err, 2198 ceph_cap_string(dirtied), mask); 2199 2200 ceph_mdsc_put_request(req); 2201 ceph_free_cap_flush(prealloc_cf); 2202 2203 if (err >= 0 && (mask & CEPH_SETATTR_SIZE)) 2204 __ceph_do_pending_vmtruncate(inode); 2205 2206 return err; 2207 } 2208 2209 /* 2210 * setattr 2211 */ 2212 int ceph_setattr(struct user_namespace *mnt_userns, struct dentry *dentry, 2213 struct iattr *attr) 2214 { 2215 struct inode *inode = d_inode(dentry); 2216 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 2217 int err; 2218 2219 if (ceph_snap(inode) != CEPH_NOSNAP) 2220 return -EROFS; 2221 2222 err = setattr_prepare(&init_user_ns, dentry, attr); 2223 if (err != 0) 2224 return err; 2225 2226 if ((attr->ia_valid & ATTR_SIZE) && 2227 attr->ia_size > max(i_size_read(inode), fsc->max_file_size)) 2228 return -EFBIG; 2229 2230 if ((attr->ia_valid & ATTR_SIZE) && 2231 ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size)) 2232 return -EDQUOT; 2233 2234 err = __ceph_setattr(inode, attr); 2235 2236 if (err >= 0 && (attr->ia_valid & ATTR_MODE)) 2237 err = posix_acl_chmod(&init_user_ns, inode, attr->ia_mode); 2238 2239 return err; 2240 } 2241 2242 /* 2243 * Verify that we have a lease on the given mask. If not, 2244 * do a getattr against an mds. 2245 */ 2246 int __ceph_do_getattr(struct inode *inode, struct page *locked_page, 2247 int mask, bool force) 2248 { 2249 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 2250 struct ceph_mds_client *mdsc = fsc->mdsc; 2251 struct ceph_mds_request *req; 2252 int mode; 2253 int err; 2254 2255 if (ceph_snap(inode) == CEPH_SNAPDIR) { 2256 dout("do_getattr inode %p SNAPDIR\n", inode); 2257 return 0; 2258 } 2259 2260 dout("do_getattr inode %p mask %s mode 0%o\n", 2261 inode, ceph_cap_string(mask), inode->i_mode); 2262 if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1)) 2263 return 0; 2264 2265 mode = (mask & CEPH_STAT_RSTAT) ? USE_AUTH_MDS : USE_ANY_MDS; 2266 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode); 2267 if (IS_ERR(req)) 2268 return PTR_ERR(req); 2269 req->r_inode = inode; 2270 ihold(inode); 2271 req->r_num_caps = 1; 2272 req->r_args.getattr.mask = cpu_to_le32(mask); 2273 req->r_locked_page = locked_page; 2274 err = ceph_mdsc_do_request(mdsc, NULL, req); 2275 if (locked_page && err == 0) { 2276 u64 inline_version = req->r_reply_info.targeti.inline_version; 2277 if (inline_version == 0) { 2278 /* the reply is supposed to contain inline data */ 2279 err = -EINVAL; 2280 } else if (inline_version == CEPH_INLINE_NONE) { 2281 err = -ENODATA; 2282 } else { 2283 err = req->r_reply_info.targeti.inline_len; 2284 } 2285 } 2286 ceph_mdsc_put_request(req); 2287 dout("do_getattr result=%d\n", err); 2288 return err; 2289 } 2290 2291 2292 /* 2293 * Check inode permissions. We verify we have a valid value for 2294 * the AUTH cap, then call the generic handler. 2295 */ 2296 int ceph_permission(struct user_namespace *mnt_userns, struct inode *inode, 2297 int mask) 2298 { 2299 int err; 2300 2301 if (mask & MAY_NOT_BLOCK) 2302 return -ECHILD; 2303 2304 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false); 2305 2306 if (!err) 2307 err = generic_permission(&init_user_ns, inode, mask); 2308 return err; 2309 } 2310 2311 /* Craft a mask of needed caps given a set of requested statx attrs. */ 2312 static int statx_to_caps(u32 want, umode_t mode) 2313 { 2314 int mask = 0; 2315 2316 if (want & (STATX_MODE|STATX_UID|STATX_GID|STATX_CTIME|STATX_BTIME)) 2317 mask |= CEPH_CAP_AUTH_SHARED; 2318 2319 if (want & (STATX_NLINK|STATX_CTIME)) { 2320 /* 2321 * The link count for directories depends on inode->i_subdirs, 2322 * and that is only updated when Fs caps are held. 2323 */ 2324 if (S_ISDIR(mode)) 2325 mask |= CEPH_CAP_FILE_SHARED; 2326 else 2327 mask |= CEPH_CAP_LINK_SHARED; 2328 } 2329 2330 if (want & (STATX_ATIME|STATX_MTIME|STATX_CTIME|STATX_SIZE| 2331 STATX_BLOCKS)) 2332 mask |= CEPH_CAP_FILE_SHARED; 2333 2334 if (want & (STATX_CTIME)) 2335 mask |= CEPH_CAP_XATTR_SHARED; 2336 2337 return mask; 2338 } 2339 2340 /* 2341 * Get all the attributes. If we have sufficient caps for the requested attrs, 2342 * then we can avoid talking to the MDS at all. 2343 */ 2344 int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path, 2345 struct kstat *stat, u32 request_mask, unsigned int flags) 2346 { 2347 struct inode *inode = d_inode(path->dentry); 2348 struct ceph_inode_info *ci = ceph_inode(inode); 2349 u32 valid_mask = STATX_BASIC_STATS; 2350 int err = 0; 2351 2352 /* Skip the getattr altogether if we're asked not to sync */ 2353 if (!(flags & AT_STATX_DONT_SYNC)) { 2354 err = ceph_do_getattr(inode, 2355 statx_to_caps(request_mask, inode->i_mode), 2356 flags & AT_STATX_FORCE_SYNC); 2357 if (err) 2358 return err; 2359 } 2360 2361 generic_fillattr(&init_user_ns, inode, stat); 2362 stat->ino = ceph_present_inode(inode); 2363 2364 /* 2365 * btime on newly-allocated inodes is 0, so if this is still set to 2366 * that, then assume that it's not valid. 2367 */ 2368 if (ci->i_btime.tv_sec || ci->i_btime.tv_nsec) { 2369 stat->btime = ci->i_btime; 2370 valid_mask |= STATX_BTIME; 2371 } 2372 2373 if (ceph_snap(inode) == CEPH_NOSNAP) 2374 stat->dev = inode->i_sb->s_dev; 2375 else 2376 stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0; 2377 2378 if (S_ISDIR(inode->i_mode)) { 2379 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), 2380 RBYTES)) 2381 stat->size = ci->i_rbytes; 2382 else 2383 stat->size = ci->i_files + ci->i_subdirs; 2384 stat->blocks = 0; 2385 stat->blksize = 65536; 2386 /* 2387 * Some applications rely on the number of st_nlink 2388 * value on directories to be either 0 (if unlinked) 2389 * or 2 + number of subdirectories. 2390 */ 2391 if (stat->nlink == 1) 2392 /* '.' + '..' + subdirs */ 2393 stat->nlink = 1 + 1 + ci->i_subdirs; 2394 } 2395 2396 stat->result_mask = request_mask & valid_mask; 2397 return err; 2398 } 2399