1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/module.h> 4 #include <linux/fs.h> 5 #include <linux/slab.h> 6 #include <linux/string.h> 7 #include <linux/uaccess.h> 8 #include <linux/kernel.h> 9 #include <linux/writeback.h> 10 #include <linux/vmalloc.h> 11 #include <linux/xattr.h> 12 #include <linux/posix_acl.h> 13 #include <linux/random.h> 14 #include <linux/sort.h> 15 16 #include "super.h" 17 #include "mds_client.h" 18 #include "cache.h" 19 #include <linux/ceph/decode.h> 20 21 /* 22 * Ceph inode operations 23 * 24 * Implement basic inode helpers (get, alloc) and inode ops (getattr, 25 * setattr, etc.), xattr helpers, and helpers for assimilating 26 * metadata returned by the MDS into our cache. 27 * 28 * Also define helpers for doing asynchronous writeback, invalidation, 29 * and truncation for the benefit of those who can't afford to block 30 * (typically because they are in the message handler path). 31 */ 32 33 static const struct inode_operations ceph_symlink_iops; 34 35 static void ceph_invalidate_work(struct work_struct *work); 36 static void ceph_writeback_work(struct work_struct *work); 37 static void ceph_vmtruncate_work(struct work_struct *work); 38 39 /* 40 * find or create an inode, given the ceph ino number 41 */ 42 static int ceph_set_ino_cb(struct inode *inode, void *data) 43 { 44 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data; 45 inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data); 46 return 0; 47 } 48 49 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino) 50 { 51 struct inode *inode; 52 ino_t t = ceph_vino_to_ino(vino); 53 54 inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino); 55 if (inode == NULL) 56 return ERR_PTR(-ENOMEM); 57 if (inode->i_state & I_NEW) { 58 dout("get_inode created new inode %p %llx.%llx ino %llx\n", 59 inode, ceph_vinop(inode), (u64)inode->i_ino); 60 unlock_new_inode(inode); 61 } 62 63 dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino, 64 vino.snap, inode); 65 return inode; 66 } 67 68 /* 69 * get/constuct snapdir inode for a given directory 70 */ 71 struct inode *ceph_get_snapdir(struct inode *parent) 72 { 73 struct ceph_vino vino = { 74 .ino = ceph_ino(parent), 75 .snap = CEPH_SNAPDIR, 76 }; 77 struct inode *inode = ceph_get_inode(parent->i_sb, vino); 78 struct ceph_inode_info *ci = ceph_inode(inode); 79 80 BUG_ON(!S_ISDIR(parent->i_mode)); 81 if (IS_ERR(inode)) 82 return inode; 83 inode->i_mode = parent->i_mode; 84 inode->i_uid = parent->i_uid; 85 inode->i_gid = parent->i_gid; 86 inode->i_op = &ceph_snapdir_iops; 87 inode->i_fop = &ceph_snapdir_fops; 88 ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ 89 ci->i_rbytes = 0; 90 return inode; 91 } 92 93 const struct inode_operations ceph_file_iops = { 94 .permission = ceph_permission, 95 .setattr = ceph_setattr, 96 .getattr = ceph_getattr, 97 .listxattr = ceph_listxattr, 98 .get_acl = ceph_get_acl, 99 .set_acl = ceph_set_acl, 100 }; 101 102 103 /* 104 * We use a 'frag tree' to keep track of the MDS's directory fragments 105 * for a given inode (usually there is just a single fragment). We 106 * need to know when a child frag is delegated to a new MDS, or when 107 * it is flagged as replicated, so we can direct our requests 108 * accordingly. 109 */ 110 111 /* 112 * find/create a frag in the tree 113 */ 114 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci, 115 u32 f) 116 { 117 struct rb_node **p; 118 struct rb_node *parent = NULL; 119 struct ceph_inode_frag *frag; 120 int c; 121 122 p = &ci->i_fragtree.rb_node; 123 while (*p) { 124 parent = *p; 125 frag = rb_entry(parent, struct ceph_inode_frag, node); 126 c = ceph_frag_compare(f, frag->frag); 127 if (c < 0) 128 p = &(*p)->rb_left; 129 else if (c > 0) 130 p = &(*p)->rb_right; 131 else 132 return frag; 133 } 134 135 frag = kmalloc(sizeof(*frag), GFP_NOFS); 136 if (!frag) { 137 pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx " 138 "frag %x\n", &ci->vfs_inode, 139 ceph_vinop(&ci->vfs_inode), f); 140 return ERR_PTR(-ENOMEM); 141 } 142 frag->frag = f; 143 frag->split_by = 0; 144 frag->mds = -1; 145 frag->ndist = 0; 146 147 rb_link_node(&frag->node, parent, p); 148 rb_insert_color(&frag->node, &ci->i_fragtree); 149 150 dout("get_or_create_frag added %llx.%llx frag %x\n", 151 ceph_vinop(&ci->vfs_inode), f); 152 return frag; 153 } 154 155 /* 156 * find a specific frag @f 157 */ 158 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f) 159 { 160 struct rb_node *n = ci->i_fragtree.rb_node; 161 162 while (n) { 163 struct ceph_inode_frag *frag = 164 rb_entry(n, struct ceph_inode_frag, node); 165 int c = ceph_frag_compare(f, frag->frag); 166 if (c < 0) 167 n = n->rb_left; 168 else if (c > 0) 169 n = n->rb_right; 170 else 171 return frag; 172 } 173 return NULL; 174 } 175 176 /* 177 * Choose frag containing the given value @v. If @pfrag is 178 * specified, copy the frag delegation info to the caller if 179 * it is present. 180 */ 181 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v, 182 struct ceph_inode_frag *pfrag, int *found) 183 { 184 u32 t = ceph_frag_make(0, 0); 185 struct ceph_inode_frag *frag; 186 unsigned nway, i; 187 u32 n; 188 189 if (found) 190 *found = 0; 191 192 while (1) { 193 WARN_ON(!ceph_frag_contains_value(t, v)); 194 frag = __ceph_find_frag(ci, t); 195 if (!frag) 196 break; /* t is a leaf */ 197 if (frag->split_by == 0) { 198 if (pfrag) 199 memcpy(pfrag, frag, sizeof(*pfrag)); 200 if (found) 201 *found = 1; 202 break; 203 } 204 205 /* choose child */ 206 nway = 1 << frag->split_by; 207 dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t, 208 frag->split_by, nway); 209 for (i = 0; i < nway; i++) { 210 n = ceph_frag_make_child(t, frag->split_by, i); 211 if (ceph_frag_contains_value(n, v)) { 212 t = n; 213 break; 214 } 215 } 216 BUG_ON(i == nway); 217 } 218 dout("choose_frag(%x) = %x\n", v, t); 219 220 return t; 221 } 222 223 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v, 224 struct ceph_inode_frag *pfrag, int *found) 225 { 226 u32 ret; 227 mutex_lock(&ci->i_fragtree_mutex); 228 ret = __ceph_choose_frag(ci, v, pfrag, found); 229 mutex_unlock(&ci->i_fragtree_mutex); 230 return ret; 231 } 232 233 /* 234 * Process dirfrag (delegation) info from the mds. Include leaf 235 * fragment in tree ONLY if ndist > 0. Otherwise, only 236 * branches/splits are included in i_fragtree) 237 */ 238 static int ceph_fill_dirfrag(struct inode *inode, 239 struct ceph_mds_reply_dirfrag *dirinfo) 240 { 241 struct ceph_inode_info *ci = ceph_inode(inode); 242 struct ceph_inode_frag *frag; 243 u32 id = le32_to_cpu(dirinfo->frag); 244 int mds = le32_to_cpu(dirinfo->auth); 245 int ndist = le32_to_cpu(dirinfo->ndist); 246 int diri_auth = -1; 247 int i; 248 int err = 0; 249 250 spin_lock(&ci->i_ceph_lock); 251 if (ci->i_auth_cap) 252 diri_auth = ci->i_auth_cap->mds; 253 spin_unlock(&ci->i_ceph_lock); 254 255 if (mds == -1) /* CDIR_AUTH_PARENT */ 256 mds = diri_auth; 257 258 mutex_lock(&ci->i_fragtree_mutex); 259 if (ndist == 0 && mds == diri_auth) { 260 /* no delegation info needed. */ 261 frag = __ceph_find_frag(ci, id); 262 if (!frag) 263 goto out; 264 if (frag->split_by == 0) { 265 /* tree leaf, remove */ 266 dout("fill_dirfrag removed %llx.%llx frag %x" 267 " (no ref)\n", ceph_vinop(inode), id); 268 rb_erase(&frag->node, &ci->i_fragtree); 269 kfree(frag); 270 } else { 271 /* tree branch, keep and clear */ 272 dout("fill_dirfrag cleared %llx.%llx frag %x" 273 " referral\n", ceph_vinop(inode), id); 274 frag->mds = -1; 275 frag->ndist = 0; 276 } 277 goto out; 278 } 279 280 281 /* find/add this frag to store mds delegation info */ 282 frag = __get_or_create_frag(ci, id); 283 if (IS_ERR(frag)) { 284 /* this is not the end of the world; we can continue 285 with bad/inaccurate delegation info */ 286 pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n", 287 ceph_vinop(inode), le32_to_cpu(dirinfo->frag)); 288 err = -ENOMEM; 289 goto out; 290 } 291 292 frag->mds = mds; 293 frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP); 294 for (i = 0; i < frag->ndist; i++) 295 frag->dist[i] = le32_to_cpu(dirinfo->dist[i]); 296 dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n", 297 ceph_vinop(inode), frag->frag, frag->ndist); 298 299 out: 300 mutex_unlock(&ci->i_fragtree_mutex); 301 return err; 302 } 303 304 static int frag_tree_split_cmp(const void *l, const void *r) 305 { 306 struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l; 307 struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r; 308 return ceph_frag_compare(ls->frag, rs->frag); 309 } 310 311 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag) 312 { 313 if (!frag) 314 return f == ceph_frag_make(0, 0); 315 if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by) 316 return false; 317 return ceph_frag_contains_value(frag->frag, ceph_frag_value(f)); 318 } 319 320 static int ceph_fill_fragtree(struct inode *inode, 321 struct ceph_frag_tree_head *fragtree, 322 struct ceph_mds_reply_dirfrag *dirinfo) 323 { 324 struct ceph_inode_info *ci = ceph_inode(inode); 325 struct ceph_inode_frag *frag, *prev_frag = NULL; 326 struct rb_node *rb_node; 327 unsigned i, split_by, nsplits; 328 u32 id; 329 bool update = false; 330 331 mutex_lock(&ci->i_fragtree_mutex); 332 nsplits = le32_to_cpu(fragtree->nsplits); 333 if (nsplits != ci->i_fragtree_nsplits) { 334 update = true; 335 } else if (nsplits) { 336 i = prandom_u32() % nsplits; 337 id = le32_to_cpu(fragtree->splits[i].frag); 338 if (!__ceph_find_frag(ci, id)) 339 update = true; 340 } else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) { 341 rb_node = rb_first(&ci->i_fragtree); 342 frag = rb_entry(rb_node, struct ceph_inode_frag, node); 343 if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node)) 344 update = true; 345 } 346 if (!update && dirinfo) { 347 id = le32_to_cpu(dirinfo->frag); 348 if (id != __ceph_choose_frag(ci, id, NULL, NULL)) 349 update = true; 350 } 351 if (!update) 352 goto out_unlock; 353 354 if (nsplits > 1) { 355 sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]), 356 frag_tree_split_cmp, NULL); 357 } 358 359 dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode)); 360 rb_node = rb_first(&ci->i_fragtree); 361 for (i = 0; i < nsplits; i++) { 362 id = le32_to_cpu(fragtree->splits[i].frag); 363 split_by = le32_to_cpu(fragtree->splits[i].by); 364 if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) { 365 pr_err("fill_fragtree %llx.%llx invalid split %d/%u, " 366 "frag %x split by %d\n", ceph_vinop(inode), 367 i, nsplits, id, split_by); 368 continue; 369 } 370 frag = NULL; 371 while (rb_node) { 372 frag = rb_entry(rb_node, struct ceph_inode_frag, node); 373 if (ceph_frag_compare(frag->frag, id) >= 0) { 374 if (frag->frag != id) 375 frag = NULL; 376 else 377 rb_node = rb_next(rb_node); 378 break; 379 } 380 rb_node = rb_next(rb_node); 381 /* delete stale split/leaf node */ 382 if (frag->split_by > 0 || 383 !is_frag_child(frag->frag, prev_frag)) { 384 rb_erase(&frag->node, &ci->i_fragtree); 385 if (frag->split_by > 0) 386 ci->i_fragtree_nsplits--; 387 kfree(frag); 388 } 389 frag = NULL; 390 } 391 if (!frag) { 392 frag = __get_or_create_frag(ci, id); 393 if (IS_ERR(frag)) 394 continue; 395 } 396 if (frag->split_by == 0) 397 ci->i_fragtree_nsplits++; 398 frag->split_by = split_by; 399 dout(" frag %x split by %d\n", frag->frag, frag->split_by); 400 prev_frag = frag; 401 } 402 while (rb_node) { 403 frag = rb_entry(rb_node, struct ceph_inode_frag, node); 404 rb_node = rb_next(rb_node); 405 /* delete stale split/leaf node */ 406 if (frag->split_by > 0 || 407 !is_frag_child(frag->frag, prev_frag)) { 408 rb_erase(&frag->node, &ci->i_fragtree); 409 if (frag->split_by > 0) 410 ci->i_fragtree_nsplits--; 411 kfree(frag); 412 } 413 } 414 out_unlock: 415 mutex_unlock(&ci->i_fragtree_mutex); 416 return 0; 417 } 418 419 /* 420 * initialize a newly allocated inode. 421 */ 422 struct inode *ceph_alloc_inode(struct super_block *sb) 423 { 424 struct ceph_inode_info *ci; 425 int i; 426 427 ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS); 428 if (!ci) 429 return NULL; 430 431 dout("alloc_inode %p\n", &ci->vfs_inode); 432 433 spin_lock_init(&ci->i_ceph_lock); 434 435 ci->i_version = 0; 436 ci->i_inline_version = 0; 437 ci->i_time_warp_seq = 0; 438 ci->i_ceph_flags = 0; 439 atomic64_set(&ci->i_ordered_count, 1); 440 atomic64_set(&ci->i_release_count, 1); 441 atomic64_set(&ci->i_complete_seq[0], 0); 442 atomic64_set(&ci->i_complete_seq[1], 0); 443 ci->i_symlink = NULL; 444 445 memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout)); 446 RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL); 447 448 ci->i_fragtree = RB_ROOT; 449 mutex_init(&ci->i_fragtree_mutex); 450 451 ci->i_xattrs.blob = NULL; 452 ci->i_xattrs.prealloc_blob = NULL; 453 ci->i_xattrs.dirty = false; 454 ci->i_xattrs.index = RB_ROOT; 455 ci->i_xattrs.count = 0; 456 ci->i_xattrs.names_size = 0; 457 ci->i_xattrs.vals_size = 0; 458 ci->i_xattrs.version = 0; 459 ci->i_xattrs.index_version = 0; 460 461 ci->i_caps = RB_ROOT; 462 ci->i_auth_cap = NULL; 463 ci->i_dirty_caps = 0; 464 ci->i_flushing_caps = 0; 465 INIT_LIST_HEAD(&ci->i_dirty_item); 466 INIT_LIST_HEAD(&ci->i_flushing_item); 467 ci->i_prealloc_cap_flush = NULL; 468 INIT_LIST_HEAD(&ci->i_cap_flush_list); 469 init_waitqueue_head(&ci->i_cap_wq); 470 ci->i_hold_caps_min = 0; 471 ci->i_hold_caps_max = 0; 472 INIT_LIST_HEAD(&ci->i_cap_delay_list); 473 INIT_LIST_HEAD(&ci->i_cap_snaps); 474 ci->i_head_snapc = NULL; 475 ci->i_snap_caps = 0; 476 477 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) 478 ci->i_nr_by_mode[i] = 0; 479 480 mutex_init(&ci->i_truncate_mutex); 481 ci->i_truncate_seq = 0; 482 ci->i_truncate_size = 0; 483 ci->i_truncate_pending = 0; 484 485 ci->i_max_size = 0; 486 ci->i_reported_size = 0; 487 ci->i_wanted_max_size = 0; 488 ci->i_requested_max_size = 0; 489 490 ci->i_pin_ref = 0; 491 ci->i_rd_ref = 0; 492 ci->i_rdcache_ref = 0; 493 ci->i_wr_ref = 0; 494 ci->i_wb_ref = 0; 495 ci->i_wrbuffer_ref = 0; 496 ci->i_wrbuffer_ref_head = 0; 497 ci->i_shared_gen = 0; 498 ci->i_rdcache_gen = 0; 499 ci->i_rdcache_revoking = 0; 500 501 INIT_LIST_HEAD(&ci->i_unsafe_writes); 502 INIT_LIST_HEAD(&ci->i_unsafe_dirops); 503 INIT_LIST_HEAD(&ci->i_unsafe_iops); 504 spin_lock_init(&ci->i_unsafe_lock); 505 506 ci->i_snap_realm = NULL; 507 INIT_LIST_HEAD(&ci->i_snap_realm_item); 508 INIT_LIST_HEAD(&ci->i_snap_flush_item); 509 510 INIT_WORK(&ci->i_wb_work, ceph_writeback_work); 511 INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work); 512 513 INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work); 514 515 ceph_fscache_inode_init(ci); 516 517 return &ci->vfs_inode; 518 } 519 520 static void ceph_i_callback(struct rcu_head *head) 521 { 522 struct inode *inode = container_of(head, struct inode, i_rcu); 523 struct ceph_inode_info *ci = ceph_inode(inode); 524 525 kmem_cache_free(ceph_inode_cachep, ci); 526 } 527 528 void ceph_destroy_inode(struct inode *inode) 529 { 530 struct ceph_inode_info *ci = ceph_inode(inode); 531 struct ceph_inode_frag *frag; 532 struct rb_node *n; 533 534 dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode)); 535 536 ceph_fscache_unregister_inode_cookie(ci); 537 538 ceph_queue_caps_release(inode); 539 540 /* 541 * we may still have a snap_realm reference if there are stray 542 * caps in i_snap_caps. 543 */ 544 if (ci->i_snap_realm) { 545 struct ceph_mds_client *mdsc = 546 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; 547 struct ceph_snap_realm *realm = ci->i_snap_realm; 548 549 dout(" dropping residual ref to snap realm %p\n", realm); 550 spin_lock(&realm->inodes_with_caps_lock); 551 list_del_init(&ci->i_snap_realm_item); 552 spin_unlock(&realm->inodes_with_caps_lock); 553 ceph_put_snap_realm(mdsc, realm); 554 } 555 556 kfree(ci->i_symlink); 557 while ((n = rb_first(&ci->i_fragtree)) != NULL) { 558 frag = rb_entry(n, struct ceph_inode_frag, node); 559 rb_erase(n, &ci->i_fragtree); 560 kfree(frag); 561 } 562 ci->i_fragtree_nsplits = 0; 563 564 __ceph_destroy_xattrs(ci); 565 if (ci->i_xattrs.blob) 566 ceph_buffer_put(ci->i_xattrs.blob); 567 if (ci->i_xattrs.prealloc_blob) 568 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 569 570 ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns)); 571 572 call_rcu(&inode->i_rcu, ceph_i_callback); 573 } 574 575 int ceph_drop_inode(struct inode *inode) 576 { 577 /* 578 * Positve dentry and corresponding inode are always accompanied 579 * in MDS reply. So no need to keep inode in the cache after 580 * dropping all its aliases. 581 */ 582 return 1; 583 } 584 585 void ceph_evict_inode(struct inode *inode) 586 { 587 /* wait unsafe sync writes */ 588 ceph_sync_write_wait(inode); 589 truncate_inode_pages_final(&inode->i_data); 590 clear_inode(inode); 591 } 592 593 static inline blkcnt_t calc_inode_blocks(u64 size) 594 { 595 return (size + (1<<9) - 1) >> 9; 596 } 597 598 /* 599 * Helpers to fill in size, ctime, mtime, and atime. We have to be 600 * careful because either the client or MDS may have more up to date 601 * info, depending on which capabilities are held, and whether 602 * time_warp_seq or truncate_seq have increased. (Ordinarily, mtime 603 * and size are monotonically increasing, except when utimes() or 604 * truncate() increments the corresponding _seq values.) 605 */ 606 int ceph_fill_file_size(struct inode *inode, int issued, 607 u32 truncate_seq, u64 truncate_size, u64 size) 608 { 609 struct ceph_inode_info *ci = ceph_inode(inode); 610 int queue_trunc = 0; 611 612 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 || 613 (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) { 614 dout("size %lld -> %llu\n", inode->i_size, size); 615 if (size > 0 && S_ISDIR(inode->i_mode)) { 616 pr_err("fill_file_size non-zero size for directory\n"); 617 size = 0; 618 } 619 i_size_write(inode, size); 620 inode->i_blocks = calc_inode_blocks(size); 621 ci->i_reported_size = size; 622 if (truncate_seq != ci->i_truncate_seq) { 623 dout("truncate_seq %u -> %u\n", 624 ci->i_truncate_seq, truncate_seq); 625 ci->i_truncate_seq = truncate_seq; 626 627 /* the MDS should have revoked these caps */ 628 WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL | 629 CEPH_CAP_FILE_RD | 630 CEPH_CAP_FILE_WR | 631 CEPH_CAP_FILE_LAZYIO)); 632 /* 633 * If we hold relevant caps, or in the case where we're 634 * not the only client referencing this file and we 635 * don't hold those caps, then we need to check whether 636 * the file is either opened or mmaped 637 */ 638 if ((issued & (CEPH_CAP_FILE_CACHE| 639 CEPH_CAP_FILE_BUFFER)) || 640 mapping_mapped(inode->i_mapping) || 641 __ceph_caps_file_wanted(ci)) { 642 ci->i_truncate_pending++; 643 queue_trunc = 1; 644 } 645 } 646 } 647 if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 && 648 ci->i_truncate_size != truncate_size) { 649 dout("truncate_size %lld -> %llu\n", ci->i_truncate_size, 650 truncate_size); 651 ci->i_truncate_size = truncate_size; 652 } 653 654 if (queue_trunc) 655 ceph_fscache_invalidate(inode); 656 657 return queue_trunc; 658 } 659 660 void ceph_fill_file_time(struct inode *inode, int issued, 661 u64 time_warp_seq, struct timespec *ctime, 662 struct timespec *mtime, struct timespec *atime) 663 { 664 struct ceph_inode_info *ci = ceph_inode(inode); 665 int warn = 0; 666 667 if (issued & (CEPH_CAP_FILE_EXCL| 668 CEPH_CAP_FILE_WR| 669 CEPH_CAP_FILE_BUFFER| 670 CEPH_CAP_AUTH_EXCL| 671 CEPH_CAP_XATTR_EXCL)) { 672 if (timespec_compare(ctime, &inode->i_ctime) > 0) { 673 dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n", 674 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 675 ctime->tv_sec, ctime->tv_nsec); 676 inode->i_ctime = *ctime; 677 } 678 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) { 679 /* the MDS did a utimes() */ 680 dout("mtime %ld.%09ld -> %ld.%09ld " 681 "tw %d -> %d\n", 682 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 683 mtime->tv_sec, mtime->tv_nsec, 684 ci->i_time_warp_seq, (int)time_warp_seq); 685 686 inode->i_mtime = *mtime; 687 inode->i_atime = *atime; 688 ci->i_time_warp_seq = time_warp_seq; 689 } else if (time_warp_seq == ci->i_time_warp_seq) { 690 /* nobody did utimes(); take the max */ 691 if (timespec_compare(mtime, &inode->i_mtime) > 0) { 692 dout("mtime %ld.%09ld -> %ld.%09ld inc\n", 693 inode->i_mtime.tv_sec, 694 inode->i_mtime.tv_nsec, 695 mtime->tv_sec, mtime->tv_nsec); 696 inode->i_mtime = *mtime; 697 } 698 if (timespec_compare(atime, &inode->i_atime) > 0) { 699 dout("atime %ld.%09ld -> %ld.%09ld inc\n", 700 inode->i_atime.tv_sec, 701 inode->i_atime.tv_nsec, 702 atime->tv_sec, atime->tv_nsec); 703 inode->i_atime = *atime; 704 } 705 } else if (issued & CEPH_CAP_FILE_EXCL) { 706 /* we did a utimes(); ignore mds values */ 707 } else { 708 warn = 1; 709 } 710 } else { 711 /* we have no write|excl caps; whatever the MDS says is true */ 712 if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) { 713 inode->i_ctime = *ctime; 714 inode->i_mtime = *mtime; 715 inode->i_atime = *atime; 716 ci->i_time_warp_seq = time_warp_seq; 717 } else { 718 warn = 1; 719 } 720 } 721 if (warn) /* time_warp_seq shouldn't go backwards */ 722 dout("%p mds time_warp_seq %llu < %u\n", 723 inode, time_warp_seq, ci->i_time_warp_seq); 724 } 725 726 /* 727 * Populate an inode based on info from mds. May be called on new or 728 * existing inodes. 729 */ 730 static int fill_inode(struct inode *inode, struct page *locked_page, 731 struct ceph_mds_reply_info_in *iinfo, 732 struct ceph_mds_reply_dirfrag *dirinfo, 733 struct ceph_mds_session *session, 734 unsigned long ttl_from, int cap_fmode, 735 struct ceph_cap_reservation *caps_reservation) 736 { 737 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; 738 struct ceph_mds_reply_inode *info = iinfo->in; 739 struct ceph_inode_info *ci = ceph_inode(inode); 740 int issued = 0, implemented, new_issued; 741 struct timespec mtime, atime, ctime; 742 struct ceph_buffer *xattr_blob = NULL; 743 struct ceph_string *pool_ns = NULL; 744 struct ceph_cap *new_cap = NULL; 745 int err = 0; 746 bool wake = false; 747 bool queue_trunc = false; 748 bool new_version = false; 749 bool fill_inline = false; 750 751 dout("fill_inode %p ino %llx.%llx v %llu had %llu\n", 752 inode, ceph_vinop(inode), le64_to_cpu(info->version), 753 ci->i_version); 754 755 /* prealloc new cap struct */ 756 if (info->cap.caps && ceph_snap(inode) == CEPH_NOSNAP) 757 new_cap = ceph_get_cap(mdsc, caps_reservation); 758 759 /* 760 * prealloc xattr data, if it looks like we'll need it. only 761 * if len > 4 (meaning there are actually xattrs; the first 4 762 * bytes are the xattr count). 763 */ 764 if (iinfo->xattr_len > 4) { 765 xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS); 766 if (!xattr_blob) 767 pr_err("fill_inode ENOMEM xattr blob %d bytes\n", 768 iinfo->xattr_len); 769 } 770 771 if (iinfo->pool_ns_len > 0) 772 pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data, 773 iinfo->pool_ns_len); 774 775 spin_lock(&ci->i_ceph_lock); 776 777 /* 778 * provided version will be odd if inode value is projected, 779 * even if stable. skip the update if we have newer stable 780 * info (ours>=theirs, e.g. due to racing mds replies), unless 781 * we are getting projected (unstable) info (in which case the 782 * version is odd, and we want ours>theirs). 783 * us them 784 * 2 2 skip 785 * 3 2 skip 786 * 3 3 update 787 */ 788 if (ci->i_version == 0 || 789 ((info->cap.flags & CEPH_CAP_FLAG_AUTH) && 790 le64_to_cpu(info->version) > (ci->i_version & ~1))) 791 new_version = true; 792 793 issued = __ceph_caps_issued(ci, &implemented); 794 issued |= implemented | __ceph_caps_dirty(ci); 795 new_issued = ~issued & le32_to_cpu(info->cap.caps); 796 797 /* update inode */ 798 ci->i_version = le64_to_cpu(info->version); 799 inode->i_version++; 800 inode->i_rdev = le32_to_cpu(info->rdev); 801 inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1; 802 803 if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) && 804 (issued & CEPH_CAP_AUTH_EXCL) == 0) { 805 inode->i_mode = le32_to_cpu(info->mode); 806 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid)); 807 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid)); 808 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode, 809 from_kuid(&init_user_ns, inode->i_uid), 810 from_kgid(&init_user_ns, inode->i_gid)); 811 } 812 813 if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) && 814 (issued & CEPH_CAP_LINK_EXCL) == 0) 815 set_nlink(inode, le32_to_cpu(info->nlink)); 816 817 if (new_version || (new_issued & CEPH_CAP_ANY_RD)) { 818 /* be careful with mtime, atime, size */ 819 ceph_decode_timespec(&atime, &info->atime); 820 ceph_decode_timespec(&mtime, &info->mtime); 821 ceph_decode_timespec(&ctime, &info->ctime); 822 ceph_fill_file_time(inode, issued, 823 le32_to_cpu(info->time_warp_seq), 824 &ctime, &mtime, &atime); 825 } 826 827 if (new_version || 828 (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) { 829 s64 old_pool = ci->i_layout.pool_id; 830 struct ceph_string *old_ns; 831 832 ceph_file_layout_from_legacy(&ci->i_layout, &info->layout); 833 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns, 834 lockdep_is_held(&ci->i_ceph_lock)); 835 rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns); 836 837 if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns) 838 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM; 839 840 pool_ns = old_ns; 841 842 queue_trunc = ceph_fill_file_size(inode, issued, 843 le32_to_cpu(info->truncate_seq), 844 le64_to_cpu(info->truncate_size), 845 le64_to_cpu(info->size)); 846 /* only update max_size on auth cap */ 847 if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) && 848 ci->i_max_size != le64_to_cpu(info->max_size)) { 849 dout("max_size %lld -> %llu\n", ci->i_max_size, 850 le64_to_cpu(info->max_size)); 851 ci->i_max_size = le64_to_cpu(info->max_size); 852 } 853 } 854 855 /* xattrs */ 856 /* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */ 857 if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) && 858 le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) { 859 if (ci->i_xattrs.blob) 860 ceph_buffer_put(ci->i_xattrs.blob); 861 ci->i_xattrs.blob = xattr_blob; 862 if (xattr_blob) 863 memcpy(ci->i_xattrs.blob->vec.iov_base, 864 iinfo->xattr_data, iinfo->xattr_len); 865 ci->i_xattrs.version = le64_to_cpu(info->xattr_version); 866 ceph_forget_all_cached_acls(inode); 867 xattr_blob = NULL; 868 } 869 870 inode->i_mapping->a_ops = &ceph_aops; 871 872 switch (inode->i_mode & S_IFMT) { 873 case S_IFIFO: 874 case S_IFBLK: 875 case S_IFCHR: 876 case S_IFSOCK: 877 init_special_inode(inode, inode->i_mode, inode->i_rdev); 878 inode->i_op = &ceph_file_iops; 879 break; 880 case S_IFREG: 881 inode->i_op = &ceph_file_iops; 882 inode->i_fop = &ceph_file_fops; 883 break; 884 case S_IFLNK: 885 inode->i_op = &ceph_symlink_iops; 886 if (!ci->i_symlink) { 887 u32 symlen = iinfo->symlink_len; 888 char *sym; 889 890 spin_unlock(&ci->i_ceph_lock); 891 892 if (symlen != i_size_read(inode)) { 893 pr_err("fill_inode %llx.%llx BAD symlink " 894 "size %lld\n", ceph_vinop(inode), 895 i_size_read(inode)); 896 i_size_write(inode, symlen); 897 inode->i_blocks = calc_inode_blocks(symlen); 898 } 899 900 err = -ENOMEM; 901 sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS); 902 if (!sym) 903 goto out; 904 905 spin_lock(&ci->i_ceph_lock); 906 if (!ci->i_symlink) 907 ci->i_symlink = sym; 908 else 909 kfree(sym); /* lost a race */ 910 } 911 inode->i_link = ci->i_symlink; 912 break; 913 case S_IFDIR: 914 inode->i_op = &ceph_dir_iops; 915 inode->i_fop = &ceph_dir_fops; 916 917 ci->i_dir_layout = iinfo->dir_layout; 918 919 ci->i_files = le64_to_cpu(info->files); 920 ci->i_subdirs = le64_to_cpu(info->subdirs); 921 ci->i_rbytes = le64_to_cpu(info->rbytes); 922 ci->i_rfiles = le64_to_cpu(info->rfiles); 923 ci->i_rsubdirs = le64_to_cpu(info->rsubdirs); 924 ceph_decode_timespec(&ci->i_rctime, &info->rctime); 925 break; 926 default: 927 pr_err("fill_inode %llx.%llx BAD mode 0%o\n", 928 ceph_vinop(inode), inode->i_mode); 929 } 930 931 /* were we issued a capability? */ 932 if (info->cap.caps) { 933 if (ceph_snap(inode) == CEPH_NOSNAP) { 934 unsigned caps = le32_to_cpu(info->cap.caps); 935 ceph_add_cap(inode, session, 936 le64_to_cpu(info->cap.cap_id), 937 cap_fmode, caps, 938 le32_to_cpu(info->cap.wanted), 939 le32_to_cpu(info->cap.seq), 940 le32_to_cpu(info->cap.mseq), 941 le64_to_cpu(info->cap.realm), 942 info->cap.flags, &new_cap); 943 944 /* set dir completion flag? */ 945 if (S_ISDIR(inode->i_mode) && 946 ci->i_files == 0 && ci->i_subdirs == 0 && 947 (caps & CEPH_CAP_FILE_SHARED) && 948 (issued & CEPH_CAP_FILE_EXCL) == 0 && 949 !__ceph_dir_is_complete(ci)) { 950 dout(" marking %p complete (empty)\n", inode); 951 i_size_write(inode, 0); 952 __ceph_dir_set_complete(ci, 953 atomic64_read(&ci->i_release_count), 954 atomic64_read(&ci->i_ordered_count)); 955 } 956 957 wake = true; 958 } else { 959 dout(" %p got snap_caps %s\n", inode, 960 ceph_cap_string(le32_to_cpu(info->cap.caps))); 961 ci->i_snap_caps |= le32_to_cpu(info->cap.caps); 962 if (cap_fmode >= 0) 963 __ceph_get_fmode(ci, cap_fmode); 964 } 965 } else if (cap_fmode >= 0) { 966 pr_warn("mds issued no caps on %llx.%llx\n", 967 ceph_vinop(inode)); 968 __ceph_get_fmode(ci, cap_fmode); 969 } 970 971 if (iinfo->inline_version > 0 && 972 iinfo->inline_version >= ci->i_inline_version) { 973 int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; 974 ci->i_inline_version = iinfo->inline_version; 975 if (ci->i_inline_version != CEPH_INLINE_NONE && 976 (locked_page || 977 (le32_to_cpu(info->cap.caps) & cache_caps))) 978 fill_inline = true; 979 } 980 981 spin_unlock(&ci->i_ceph_lock); 982 983 if (fill_inline) 984 ceph_fill_inline_data(inode, locked_page, 985 iinfo->inline_data, iinfo->inline_len); 986 987 if (wake) 988 wake_up_all(&ci->i_cap_wq); 989 990 /* queue truncate if we saw i_size decrease */ 991 if (queue_trunc) 992 ceph_queue_vmtruncate(inode); 993 994 /* populate frag tree */ 995 if (S_ISDIR(inode->i_mode)) 996 ceph_fill_fragtree(inode, &info->fragtree, dirinfo); 997 998 /* update delegation info? */ 999 if (dirinfo) 1000 ceph_fill_dirfrag(inode, dirinfo); 1001 1002 err = 0; 1003 out: 1004 if (new_cap) 1005 ceph_put_cap(mdsc, new_cap); 1006 if (xattr_blob) 1007 ceph_buffer_put(xattr_blob); 1008 ceph_put_string(pool_ns); 1009 return err; 1010 } 1011 1012 /* 1013 * caller should hold session s_mutex. 1014 */ 1015 static void update_dentry_lease(struct dentry *dentry, 1016 struct ceph_mds_reply_lease *lease, 1017 struct ceph_mds_session *session, 1018 unsigned long from_time) 1019 { 1020 struct ceph_dentry_info *di = ceph_dentry(dentry); 1021 long unsigned duration = le32_to_cpu(lease->duration_ms); 1022 long unsigned ttl = from_time + (duration * HZ) / 1000; 1023 long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000; 1024 struct inode *dir; 1025 1026 /* only track leases on regular dentries */ 1027 if (dentry->d_op != &ceph_dentry_ops) 1028 return; 1029 1030 spin_lock(&dentry->d_lock); 1031 dout("update_dentry_lease %p duration %lu ms ttl %lu\n", 1032 dentry, duration, ttl); 1033 1034 /* make lease_rdcache_gen match directory */ 1035 dir = d_inode(dentry->d_parent); 1036 di->lease_shared_gen = ceph_inode(dir)->i_shared_gen; 1037 1038 if (duration == 0) 1039 goto out_unlock; 1040 1041 if (di->lease_gen == session->s_cap_gen && 1042 time_before(ttl, di->time)) 1043 goto out_unlock; /* we already have a newer lease. */ 1044 1045 if (di->lease_session && di->lease_session != session) 1046 goto out_unlock; 1047 1048 ceph_dentry_lru_touch(dentry); 1049 1050 if (!di->lease_session) 1051 di->lease_session = ceph_get_mds_session(session); 1052 di->lease_gen = session->s_cap_gen; 1053 di->lease_seq = le32_to_cpu(lease->seq); 1054 di->lease_renew_after = half_ttl; 1055 di->lease_renew_from = 0; 1056 di->time = ttl; 1057 out_unlock: 1058 spin_unlock(&dentry->d_lock); 1059 return; 1060 } 1061 1062 /* 1063 * splice a dentry to an inode. 1064 * caller must hold directory i_mutex for this to be safe. 1065 */ 1066 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in) 1067 { 1068 struct dentry *realdn; 1069 1070 BUG_ON(d_inode(dn)); 1071 1072 /* dn must be unhashed */ 1073 if (!d_unhashed(dn)) 1074 d_drop(dn); 1075 realdn = d_splice_alias(in, dn); 1076 if (IS_ERR(realdn)) { 1077 pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n", 1078 PTR_ERR(realdn), dn, in, ceph_vinop(in)); 1079 dn = realdn; /* note realdn contains the error */ 1080 goto out; 1081 } else if (realdn) { 1082 dout("dn %p (%d) spliced with %p (%d) " 1083 "inode %p ino %llx.%llx\n", 1084 dn, d_count(dn), 1085 realdn, d_count(realdn), 1086 d_inode(realdn), ceph_vinop(d_inode(realdn))); 1087 dput(dn); 1088 dn = realdn; 1089 } else { 1090 BUG_ON(!ceph_dentry(dn)); 1091 dout("dn %p attached to %p ino %llx.%llx\n", 1092 dn, d_inode(dn), ceph_vinop(d_inode(dn))); 1093 } 1094 out: 1095 return dn; 1096 } 1097 1098 /* 1099 * Incorporate results into the local cache. This is either just 1100 * one inode, or a directory, dentry, and possibly linked-to inode (e.g., 1101 * after a lookup). 1102 * 1103 * A reply may contain 1104 * a directory inode along with a dentry. 1105 * and/or a target inode 1106 * 1107 * Called with snap_rwsem (read). 1108 */ 1109 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, 1110 struct ceph_mds_session *session) 1111 { 1112 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1113 struct inode *in = NULL; 1114 struct ceph_vino vino; 1115 struct ceph_fs_client *fsc = ceph_sb_to_client(sb); 1116 int err = 0; 1117 1118 dout("fill_trace %p is_dentry %d is_target %d\n", req, 1119 rinfo->head->is_dentry, rinfo->head->is_target); 1120 1121 #if 0 1122 /* 1123 * Debugging hook: 1124 * 1125 * If we resend completed ops to a recovering mds, we get no 1126 * trace. Since that is very rare, pretend this is the case 1127 * to ensure the 'no trace' handlers in the callers behave. 1128 * 1129 * Fill in inodes unconditionally to avoid breaking cap 1130 * invariants. 1131 */ 1132 if (rinfo->head->op & CEPH_MDS_OP_WRITE) { 1133 pr_info("fill_trace faking empty trace on %lld %s\n", 1134 req->r_tid, ceph_mds_op_name(rinfo->head->op)); 1135 if (rinfo->head->is_dentry) { 1136 rinfo->head->is_dentry = 0; 1137 err = fill_inode(req->r_locked_dir, 1138 &rinfo->diri, rinfo->dirfrag, 1139 session, req->r_request_started, -1); 1140 } 1141 if (rinfo->head->is_target) { 1142 rinfo->head->is_target = 0; 1143 ininfo = rinfo->targeti.in; 1144 vino.ino = le64_to_cpu(ininfo->ino); 1145 vino.snap = le64_to_cpu(ininfo->snapid); 1146 in = ceph_get_inode(sb, vino); 1147 err = fill_inode(in, &rinfo->targeti, NULL, 1148 session, req->r_request_started, 1149 req->r_fmode); 1150 iput(in); 1151 } 1152 } 1153 #endif 1154 1155 if (!rinfo->head->is_target && !rinfo->head->is_dentry) { 1156 dout("fill_trace reply is empty!\n"); 1157 if (rinfo->head->result == 0 && req->r_locked_dir) 1158 ceph_invalidate_dir_request(req); 1159 return 0; 1160 } 1161 1162 if (rinfo->head->is_dentry) { 1163 struct inode *dir = req->r_locked_dir; 1164 1165 if (dir) { 1166 err = fill_inode(dir, NULL, 1167 &rinfo->diri, rinfo->dirfrag, 1168 session, req->r_request_started, -1, 1169 &req->r_caps_reservation); 1170 if (err < 0) 1171 goto done; 1172 } else { 1173 WARN_ON_ONCE(1); 1174 } 1175 1176 if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME) { 1177 struct qstr dname; 1178 struct dentry *dn, *parent; 1179 1180 BUG_ON(!rinfo->head->is_target); 1181 BUG_ON(req->r_dentry); 1182 1183 parent = d_find_any_alias(dir); 1184 BUG_ON(!parent); 1185 1186 dname.name = rinfo->dname; 1187 dname.len = rinfo->dname_len; 1188 dname.hash = full_name_hash(parent, dname.name, dname.len); 1189 vino.ino = le64_to_cpu(rinfo->targeti.in->ino); 1190 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); 1191 retry_lookup: 1192 dn = d_lookup(parent, &dname); 1193 dout("d_lookup on parent=%p name=%.*s got %p\n", 1194 parent, dname.len, dname.name, dn); 1195 1196 if (!dn) { 1197 dn = d_alloc(parent, &dname); 1198 dout("d_alloc %p '%.*s' = %p\n", parent, 1199 dname.len, dname.name, dn); 1200 if (dn == NULL) { 1201 dput(parent); 1202 err = -ENOMEM; 1203 goto done; 1204 } 1205 err = ceph_init_dentry(dn); 1206 if (err < 0) { 1207 dput(dn); 1208 dput(parent); 1209 goto done; 1210 } 1211 } else if (d_really_is_positive(dn) && 1212 (ceph_ino(d_inode(dn)) != vino.ino || 1213 ceph_snap(d_inode(dn)) != vino.snap)) { 1214 dout(" dn %p points to wrong inode %p\n", 1215 dn, d_inode(dn)); 1216 d_delete(dn); 1217 dput(dn); 1218 goto retry_lookup; 1219 } 1220 1221 req->r_dentry = dn; 1222 dput(parent); 1223 } 1224 } 1225 1226 if (rinfo->head->is_target) { 1227 vino.ino = le64_to_cpu(rinfo->targeti.in->ino); 1228 vino.snap = le64_to_cpu(rinfo->targeti.in->snapid); 1229 1230 in = ceph_get_inode(sb, vino); 1231 if (IS_ERR(in)) { 1232 err = PTR_ERR(in); 1233 goto done; 1234 } 1235 req->r_target_inode = in; 1236 1237 err = fill_inode(in, req->r_locked_page, &rinfo->targeti, NULL, 1238 session, req->r_request_started, 1239 (!req->r_aborted && rinfo->head->result == 0) ? 1240 req->r_fmode : -1, 1241 &req->r_caps_reservation); 1242 if (err < 0) { 1243 pr_err("fill_inode badness %p %llx.%llx\n", 1244 in, ceph_vinop(in)); 1245 goto done; 1246 } 1247 } 1248 1249 /* 1250 * ignore null lease/binding on snapdir ENOENT, or else we 1251 * will have trouble splicing in the virtual snapdir later 1252 */ 1253 if (rinfo->head->is_dentry && !req->r_aborted && 1254 req->r_locked_dir && 1255 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, 1256 fsc->mount_options->snapdir_name, 1257 req->r_dentry->d_name.len))) { 1258 /* 1259 * lookup link rename : null -> possibly existing inode 1260 * mknod symlink mkdir : null -> new inode 1261 * unlink : linked -> null 1262 */ 1263 struct inode *dir = req->r_locked_dir; 1264 struct dentry *dn = req->r_dentry; 1265 bool have_dir_cap, have_lease; 1266 1267 BUG_ON(!dn); 1268 BUG_ON(!dir); 1269 BUG_ON(d_inode(dn->d_parent) != dir); 1270 BUG_ON(ceph_ino(dir) != 1271 le64_to_cpu(rinfo->diri.in->ino)); 1272 BUG_ON(ceph_snap(dir) != 1273 le64_to_cpu(rinfo->diri.in->snapid)); 1274 1275 /* do we have a lease on the whole dir? */ 1276 have_dir_cap = 1277 (le32_to_cpu(rinfo->diri.in->cap.caps) & 1278 CEPH_CAP_FILE_SHARED); 1279 1280 /* do we have a dn lease? */ 1281 have_lease = have_dir_cap || 1282 le32_to_cpu(rinfo->dlease->duration_ms); 1283 if (!have_lease) 1284 dout("fill_trace no dentry lease or dir cap\n"); 1285 1286 /* rename? */ 1287 if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) { 1288 struct inode *olddir = req->r_old_dentry_dir; 1289 BUG_ON(!olddir); 1290 1291 dout(" src %p '%pd' dst %p '%pd'\n", 1292 req->r_old_dentry, 1293 req->r_old_dentry, 1294 dn, dn); 1295 dout("fill_trace doing d_move %p -> %p\n", 1296 req->r_old_dentry, dn); 1297 1298 /* d_move screws up sibling dentries' offsets */ 1299 ceph_dir_clear_ordered(dir); 1300 ceph_dir_clear_ordered(olddir); 1301 1302 d_move(req->r_old_dentry, dn); 1303 dout(" src %p '%pd' dst %p '%pd'\n", 1304 req->r_old_dentry, 1305 req->r_old_dentry, 1306 dn, dn); 1307 1308 /* ensure target dentry is invalidated, despite 1309 rehashing bug in vfs_rename_dir */ 1310 ceph_invalidate_dentry_lease(dn); 1311 1312 dout("dn %p gets new offset %lld\n", req->r_old_dentry, 1313 ceph_dentry(req->r_old_dentry)->offset); 1314 1315 dn = req->r_old_dentry; /* use old_dentry */ 1316 } 1317 1318 /* null dentry? */ 1319 if (!rinfo->head->is_target) { 1320 dout("fill_trace null dentry\n"); 1321 if (d_really_is_positive(dn)) { 1322 ceph_dir_clear_ordered(dir); 1323 dout("d_delete %p\n", dn); 1324 d_delete(dn); 1325 } else { 1326 if (have_lease && d_unhashed(dn)) 1327 d_add(dn, NULL); 1328 update_dentry_lease(dn, rinfo->dlease, 1329 session, 1330 req->r_request_started); 1331 } 1332 goto done; 1333 } 1334 1335 /* attach proper inode */ 1336 if (d_really_is_negative(dn)) { 1337 ceph_dir_clear_ordered(dir); 1338 ihold(in); 1339 dn = splice_dentry(dn, in); 1340 if (IS_ERR(dn)) { 1341 err = PTR_ERR(dn); 1342 goto done; 1343 } 1344 req->r_dentry = dn; /* may have spliced */ 1345 } else if (d_really_is_positive(dn) && d_inode(dn) != in) { 1346 dout(" %p links to %p %llx.%llx, not %llx.%llx\n", 1347 dn, d_inode(dn), ceph_vinop(d_inode(dn)), 1348 ceph_vinop(in)); 1349 d_invalidate(dn); 1350 have_lease = false; 1351 } 1352 1353 if (have_lease) 1354 update_dentry_lease(dn, rinfo->dlease, session, 1355 req->r_request_started); 1356 dout(" final dn %p\n", dn); 1357 } else if (!req->r_aborted && 1358 (req->r_op == CEPH_MDS_OP_LOOKUPSNAP || 1359 req->r_op == CEPH_MDS_OP_MKSNAP)) { 1360 struct dentry *dn = req->r_dentry; 1361 struct inode *dir = req->r_locked_dir; 1362 1363 /* fill out a snapdir LOOKUPSNAP dentry */ 1364 BUG_ON(!dn); 1365 BUG_ON(!dir); 1366 BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR); 1367 dout(" linking snapped dir %p to dn %p\n", in, dn); 1368 ceph_dir_clear_ordered(dir); 1369 ihold(in); 1370 dn = splice_dentry(dn, in); 1371 if (IS_ERR(dn)) { 1372 err = PTR_ERR(dn); 1373 goto done; 1374 } 1375 req->r_dentry = dn; /* may have spliced */ 1376 } 1377 done: 1378 dout("fill_trace done err=%d\n", err); 1379 return err; 1380 } 1381 1382 /* 1383 * Prepopulate our cache with readdir results, leases, etc. 1384 */ 1385 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req, 1386 struct ceph_mds_session *session) 1387 { 1388 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1389 int i, err = 0; 1390 1391 for (i = 0; i < rinfo->dir_nr; i++) { 1392 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i; 1393 struct ceph_vino vino; 1394 struct inode *in; 1395 int rc; 1396 1397 vino.ino = le64_to_cpu(rde->inode.in->ino); 1398 vino.snap = le64_to_cpu(rde->inode.in->snapid); 1399 1400 in = ceph_get_inode(req->r_dentry->d_sb, vino); 1401 if (IS_ERR(in)) { 1402 err = PTR_ERR(in); 1403 dout("new_inode badness got %d\n", err); 1404 continue; 1405 } 1406 rc = fill_inode(in, NULL, &rde->inode, NULL, session, 1407 req->r_request_started, -1, 1408 &req->r_caps_reservation); 1409 if (rc < 0) { 1410 pr_err("fill_inode badness on %p got %d\n", in, rc); 1411 err = rc; 1412 } 1413 iput(in); 1414 } 1415 1416 return err; 1417 } 1418 1419 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl) 1420 { 1421 if (ctl->page) { 1422 kunmap(ctl->page); 1423 put_page(ctl->page); 1424 ctl->page = NULL; 1425 } 1426 } 1427 1428 static int fill_readdir_cache(struct inode *dir, struct dentry *dn, 1429 struct ceph_readdir_cache_control *ctl, 1430 struct ceph_mds_request *req) 1431 { 1432 struct ceph_inode_info *ci = ceph_inode(dir); 1433 unsigned nsize = PAGE_SIZE / sizeof(struct dentry*); 1434 unsigned idx = ctl->index % nsize; 1435 pgoff_t pgoff = ctl->index / nsize; 1436 1437 if (!ctl->page || pgoff != page_index(ctl->page)) { 1438 ceph_readdir_cache_release(ctl); 1439 if (idx == 0) 1440 ctl->page = grab_cache_page(&dir->i_data, pgoff); 1441 else 1442 ctl->page = find_lock_page(&dir->i_data, pgoff); 1443 if (!ctl->page) { 1444 ctl->index = -1; 1445 return idx == 0 ? -ENOMEM : 0; 1446 } 1447 /* reading/filling the cache are serialized by 1448 * i_mutex, no need to use page lock */ 1449 unlock_page(ctl->page); 1450 ctl->dentries = kmap(ctl->page); 1451 if (idx == 0) 1452 memset(ctl->dentries, 0, PAGE_SIZE); 1453 } 1454 1455 if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) && 1456 req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) { 1457 dout("readdir cache dn %p idx %d\n", dn, ctl->index); 1458 ctl->dentries[idx] = dn; 1459 ctl->index++; 1460 } else { 1461 dout("disable readdir cache\n"); 1462 ctl->index = -1; 1463 } 1464 return 0; 1465 } 1466 1467 int ceph_readdir_prepopulate(struct ceph_mds_request *req, 1468 struct ceph_mds_session *session) 1469 { 1470 struct dentry *parent = req->r_dentry; 1471 struct ceph_inode_info *ci = ceph_inode(d_inode(parent)); 1472 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; 1473 struct qstr dname; 1474 struct dentry *dn; 1475 struct inode *in; 1476 int err = 0, skipped = 0, ret, i; 1477 struct inode *snapdir = NULL; 1478 struct ceph_mds_request_head *rhead = req->r_request->front.iov_base; 1479 u32 frag = le32_to_cpu(rhead->args.readdir.frag); 1480 u32 last_hash = 0; 1481 u32 fpos_offset; 1482 struct ceph_readdir_cache_control cache_ctl = {}; 1483 1484 if (req->r_aborted) 1485 return readdir_prepopulate_inodes_only(req, session); 1486 1487 if (rinfo->hash_order && req->r_path2) { 1488 last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash, 1489 req->r_path2, strlen(req->r_path2)); 1490 last_hash = ceph_frag_value(last_hash); 1491 } 1492 1493 if (rinfo->dir_dir && 1494 le32_to_cpu(rinfo->dir_dir->frag) != frag) { 1495 dout("readdir_prepopulate got new frag %x -> %x\n", 1496 frag, le32_to_cpu(rinfo->dir_dir->frag)); 1497 frag = le32_to_cpu(rinfo->dir_dir->frag); 1498 if (!rinfo->hash_order) 1499 req->r_readdir_offset = 2; 1500 } 1501 1502 if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) { 1503 snapdir = ceph_get_snapdir(d_inode(parent)); 1504 parent = d_find_alias(snapdir); 1505 dout("readdir_prepopulate %d items under SNAPDIR dn %p\n", 1506 rinfo->dir_nr, parent); 1507 } else { 1508 dout("readdir_prepopulate %d items under dn %p\n", 1509 rinfo->dir_nr, parent); 1510 if (rinfo->dir_dir) 1511 ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir); 1512 } 1513 1514 if (ceph_frag_is_leftmost(frag) && req->r_readdir_offset == 2 && 1515 !(rinfo->hash_order && req->r_path2)) { 1516 /* note dir version at start of readdir so we can tell 1517 * if any dentries get dropped */ 1518 req->r_dir_release_cnt = atomic64_read(&ci->i_release_count); 1519 req->r_dir_ordered_cnt = atomic64_read(&ci->i_ordered_count); 1520 req->r_readdir_cache_idx = 0; 1521 } 1522 1523 cache_ctl.index = req->r_readdir_cache_idx; 1524 fpos_offset = req->r_readdir_offset; 1525 1526 /* FIXME: release caps/leases if error occurs */ 1527 for (i = 0; i < rinfo->dir_nr; i++) { 1528 struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i; 1529 struct ceph_vino vino; 1530 1531 dname.name = rde->name; 1532 dname.len = rde->name_len; 1533 dname.hash = full_name_hash(parent, dname.name, dname.len); 1534 1535 vino.ino = le64_to_cpu(rde->inode.in->ino); 1536 vino.snap = le64_to_cpu(rde->inode.in->snapid); 1537 1538 if (rinfo->hash_order) { 1539 u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash, 1540 rde->name, rde->name_len); 1541 hash = ceph_frag_value(hash); 1542 if (hash != last_hash) 1543 fpos_offset = 2; 1544 last_hash = hash; 1545 rde->offset = ceph_make_fpos(hash, fpos_offset++, true); 1546 } else { 1547 rde->offset = ceph_make_fpos(frag, fpos_offset++, false); 1548 } 1549 1550 retry_lookup: 1551 dn = d_lookup(parent, &dname); 1552 dout("d_lookup on parent=%p name=%.*s got %p\n", 1553 parent, dname.len, dname.name, dn); 1554 1555 if (!dn) { 1556 dn = d_alloc(parent, &dname); 1557 dout("d_alloc %p '%.*s' = %p\n", parent, 1558 dname.len, dname.name, dn); 1559 if (dn == NULL) { 1560 dout("d_alloc badness\n"); 1561 err = -ENOMEM; 1562 goto out; 1563 } 1564 ret = ceph_init_dentry(dn); 1565 if (ret < 0) { 1566 dput(dn); 1567 err = ret; 1568 goto out; 1569 } 1570 } else if (d_really_is_positive(dn) && 1571 (ceph_ino(d_inode(dn)) != vino.ino || 1572 ceph_snap(d_inode(dn)) != vino.snap)) { 1573 dout(" dn %p points to wrong inode %p\n", 1574 dn, d_inode(dn)); 1575 d_delete(dn); 1576 dput(dn); 1577 goto retry_lookup; 1578 } 1579 1580 /* inode */ 1581 if (d_really_is_positive(dn)) { 1582 in = d_inode(dn); 1583 } else { 1584 in = ceph_get_inode(parent->d_sb, vino); 1585 if (IS_ERR(in)) { 1586 dout("new_inode badness\n"); 1587 d_drop(dn); 1588 dput(dn); 1589 err = PTR_ERR(in); 1590 goto out; 1591 } 1592 } 1593 1594 ret = fill_inode(in, NULL, &rde->inode, NULL, session, 1595 req->r_request_started, -1, 1596 &req->r_caps_reservation); 1597 if (ret < 0) { 1598 pr_err("fill_inode badness on %p\n", in); 1599 if (d_really_is_negative(dn)) 1600 iput(in); 1601 d_drop(dn); 1602 err = ret; 1603 goto next_item; 1604 } 1605 1606 if (d_really_is_negative(dn)) { 1607 struct dentry *realdn; 1608 1609 if (ceph_security_xattr_deadlock(in)) { 1610 dout(" skip splicing dn %p to inode %p" 1611 " (security xattr deadlock)\n", dn, in); 1612 iput(in); 1613 skipped++; 1614 goto next_item; 1615 } 1616 1617 realdn = splice_dentry(dn, in); 1618 if (IS_ERR(realdn)) { 1619 err = PTR_ERR(realdn); 1620 d_drop(dn); 1621 dn = NULL; 1622 goto next_item; 1623 } 1624 dn = realdn; 1625 } 1626 1627 ceph_dentry(dn)->offset = rde->offset; 1628 1629 update_dentry_lease(dn, rde->lease, req->r_session, 1630 req->r_request_started); 1631 1632 if (err == 0 && skipped == 0 && cache_ctl.index >= 0) { 1633 ret = fill_readdir_cache(d_inode(parent), dn, 1634 &cache_ctl, req); 1635 if (ret < 0) 1636 err = ret; 1637 } 1638 next_item: 1639 if (dn) 1640 dput(dn); 1641 } 1642 out: 1643 if (err == 0 && skipped == 0) { 1644 req->r_did_prepopulate = true; 1645 req->r_readdir_cache_idx = cache_ctl.index; 1646 } 1647 ceph_readdir_cache_release(&cache_ctl); 1648 if (snapdir) { 1649 iput(snapdir); 1650 dput(parent); 1651 } 1652 dout("readdir_prepopulate done\n"); 1653 return err; 1654 } 1655 1656 int ceph_inode_set_size(struct inode *inode, loff_t size) 1657 { 1658 struct ceph_inode_info *ci = ceph_inode(inode); 1659 int ret = 0; 1660 1661 spin_lock(&ci->i_ceph_lock); 1662 dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size); 1663 i_size_write(inode, size); 1664 inode->i_blocks = calc_inode_blocks(size); 1665 1666 /* tell the MDS if we are approaching max_size */ 1667 if ((size << 1) >= ci->i_max_size && 1668 (ci->i_reported_size << 1) < ci->i_max_size) 1669 ret = 1; 1670 1671 spin_unlock(&ci->i_ceph_lock); 1672 return ret; 1673 } 1674 1675 /* 1676 * Write back inode data in a worker thread. (This can't be done 1677 * in the message handler context.) 1678 */ 1679 void ceph_queue_writeback(struct inode *inode) 1680 { 1681 ihold(inode); 1682 if (queue_work(ceph_inode_to_client(inode)->wb_wq, 1683 &ceph_inode(inode)->i_wb_work)) { 1684 dout("ceph_queue_writeback %p\n", inode); 1685 } else { 1686 dout("ceph_queue_writeback %p failed\n", inode); 1687 iput(inode); 1688 } 1689 } 1690 1691 static void ceph_writeback_work(struct work_struct *work) 1692 { 1693 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1694 i_wb_work); 1695 struct inode *inode = &ci->vfs_inode; 1696 1697 dout("writeback %p\n", inode); 1698 filemap_fdatawrite(&inode->i_data); 1699 iput(inode); 1700 } 1701 1702 /* 1703 * queue an async invalidation 1704 */ 1705 void ceph_queue_invalidate(struct inode *inode) 1706 { 1707 ihold(inode); 1708 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, 1709 &ceph_inode(inode)->i_pg_inv_work)) { 1710 dout("ceph_queue_invalidate %p\n", inode); 1711 } else { 1712 dout("ceph_queue_invalidate %p failed\n", inode); 1713 iput(inode); 1714 } 1715 } 1716 1717 /* 1718 * Invalidate inode pages in a worker thread. (This can't be done 1719 * in the message handler context.) 1720 */ 1721 static void ceph_invalidate_work(struct work_struct *work) 1722 { 1723 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1724 i_pg_inv_work); 1725 struct inode *inode = &ci->vfs_inode; 1726 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 1727 u32 orig_gen; 1728 int check = 0; 1729 1730 mutex_lock(&ci->i_truncate_mutex); 1731 1732 if (ACCESS_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) { 1733 pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n", 1734 inode, ceph_ino(inode)); 1735 mapping_set_error(inode->i_mapping, -EIO); 1736 truncate_pagecache(inode, 0); 1737 mutex_unlock(&ci->i_truncate_mutex); 1738 goto out; 1739 } 1740 1741 spin_lock(&ci->i_ceph_lock); 1742 dout("invalidate_pages %p gen %d revoking %d\n", inode, 1743 ci->i_rdcache_gen, ci->i_rdcache_revoking); 1744 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) { 1745 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE)) 1746 check = 1; 1747 spin_unlock(&ci->i_ceph_lock); 1748 mutex_unlock(&ci->i_truncate_mutex); 1749 goto out; 1750 } 1751 orig_gen = ci->i_rdcache_gen; 1752 spin_unlock(&ci->i_ceph_lock); 1753 1754 if (invalidate_inode_pages2(inode->i_mapping) < 0) { 1755 pr_err("invalidate_pages %p fails\n", inode); 1756 } 1757 1758 spin_lock(&ci->i_ceph_lock); 1759 if (orig_gen == ci->i_rdcache_gen && 1760 orig_gen == ci->i_rdcache_revoking) { 1761 dout("invalidate_pages %p gen %d successful\n", inode, 1762 ci->i_rdcache_gen); 1763 ci->i_rdcache_revoking--; 1764 check = 1; 1765 } else { 1766 dout("invalidate_pages %p gen %d raced, now %d revoking %d\n", 1767 inode, orig_gen, ci->i_rdcache_gen, 1768 ci->i_rdcache_revoking); 1769 if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE)) 1770 check = 1; 1771 } 1772 spin_unlock(&ci->i_ceph_lock); 1773 mutex_unlock(&ci->i_truncate_mutex); 1774 out: 1775 if (check) 1776 ceph_check_caps(ci, 0, NULL); 1777 iput(inode); 1778 } 1779 1780 1781 /* 1782 * called by trunc_wq; 1783 * 1784 * We also truncate in a separate thread as well. 1785 */ 1786 static void ceph_vmtruncate_work(struct work_struct *work) 1787 { 1788 struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info, 1789 i_vmtruncate_work); 1790 struct inode *inode = &ci->vfs_inode; 1791 1792 dout("vmtruncate_work %p\n", inode); 1793 __ceph_do_pending_vmtruncate(inode); 1794 iput(inode); 1795 } 1796 1797 /* 1798 * Queue an async vmtruncate. If we fail to queue work, we will handle 1799 * the truncation the next time we call __ceph_do_pending_vmtruncate. 1800 */ 1801 void ceph_queue_vmtruncate(struct inode *inode) 1802 { 1803 struct ceph_inode_info *ci = ceph_inode(inode); 1804 1805 ihold(inode); 1806 1807 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, 1808 &ci->i_vmtruncate_work)) { 1809 dout("ceph_queue_vmtruncate %p\n", inode); 1810 } else { 1811 dout("ceph_queue_vmtruncate %p failed, pending=%d\n", 1812 inode, ci->i_truncate_pending); 1813 iput(inode); 1814 } 1815 } 1816 1817 /* 1818 * Make sure any pending truncation is applied before doing anything 1819 * that may depend on it. 1820 */ 1821 void __ceph_do_pending_vmtruncate(struct inode *inode) 1822 { 1823 struct ceph_inode_info *ci = ceph_inode(inode); 1824 u64 to; 1825 int wrbuffer_refs, finish = 0; 1826 1827 mutex_lock(&ci->i_truncate_mutex); 1828 retry: 1829 spin_lock(&ci->i_ceph_lock); 1830 if (ci->i_truncate_pending == 0) { 1831 dout("__do_pending_vmtruncate %p none pending\n", inode); 1832 spin_unlock(&ci->i_ceph_lock); 1833 mutex_unlock(&ci->i_truncate_mutex); 1834 return; 1835 } 1836 1837 /* 1838 * make sure any dirty snapped pages are flushed before we 1839 * possibly truncate them.. so write AND block! 1840 */ 1841 if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) { 1842 dout("__do_pending_vmtruncate %p flushing snaps first\n", 1843 inode); 1844 spin_unlock(&ci->i_ceph_lock); 1845 filemap_write_and_wait_range(&inode->i_data, 0, 1846 inode->i_sb->s_maxbytes); 1847 goto retry; 1848 } 1849 1850 /* there should be no reader or writer */ 1851 WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref); 1852 1853 to = ci->i_truncate_size; 1854 wrbuffer_refs = ci->i_wrbuffer_ref; 1855 dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode, 1856 ci->i_truncate_pending, to); 1857 spin_unlock(&ci->i_ceph_lock); 1858 1859 truncate_pagecache(inode, to); 1860 1861 spin_lock(&ci->i_ceph_lock); 1862 if (to == ci->i_truncate_size) { 1863 ci->i_truncate_pending = 0; 1864 finish = 1; 1865 } 1866 spin_unlock(&ci->i_ceph_lock); 1867 if (!finish) 1868 goto retry; 1869 1870 mutex_unlock(&ci->i_truncate_mutex); 1871 1872 if (wrbuffer_refs == 0) 1873 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL); 1874 1875 wake_up_all(&ci->i_cap_wq); 1876 } 1877 1878 /* 1879 * symlinks 1880 */ 1881 static const struct inode_operations ceph_symlink_iops = { 1882 .readlink = generic_readlink, 1883 .get_link = simple_get_link, 1884 .setattr = ceph_setattr, 1885 .getattr = ceph_getattr, 1886 .listxattr = ceph_listxattr, 1887 }; 1888 1889 int __ceph_setattr(struct inode *inode, struct iattr *attr) 1890 { 1891 struct ceph_inode_info *ci = ceph_inode(inode); 1892 const unsigned int ia_valid = attr->ia_valid; 1893 struct ceph_mds_request *req; 1894 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 1895 struct ceph_cap_flush *prealloc_cf; 1896 int issued; 1897 int release = 0, dirtied = 0; 1898 int mask = 0; 1899 int err = 0; 1900 int inode_dirty_flags = 0; 1901 bool lock_snap_rwsem = false; 1902 1903 prealloc_cf = ceph_alloc_cap_flush(); 1904 if (!prealloc_cf) 1905 return -ENOMEM; 1906 1907 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR, 1908 USE_AUTH_MDS); 1909 if (IS_ERR(req)) { 1910 ceph_free_cap_flush(prealloc_cf); 1911 return PTR_ERR(req); 1912 } 1913 1914 spin_lock(&ci->i_ceph_lock); 1915 issued = __ceph_caps_issued(ci, NULL); 1916 1917 if (!ci->i_head_snapc && 1918 (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) { 1919 lock_snap_rwsem = true; 1920 if (!down_read_trylock(&mdsc->snap_rwsem)) { 1921 spin_unlock(&ci->i_ceph_lock); 1922 down_read(&mdsc->snap_rwsem); 1923 spin_lock(&ci->i_ceph_lock); 1924 issued = __ceph_caps_issued(ci, NULL); 1925 } 1926 } 1927 1928 dout("setattr %p issued %s\n", inode, ceph_cap_string(issued)); 1929 1930 if (ia_valid & ATTR_UID) { 1931 dout("setattr %p uid %d -> %d\n", inode, 1932 from_kuid(&init_user_ns, inode->i_uid), 1933 from_kuid(&init_user_ns, attr->ia_uid)); 1934 if (issued & CEPH_CAP_AUTH_EXCL) { 1935 inode->i_uid = attr->ia_uid; 1936 dirtied |= CEPH_CAP_AUTH_EXCL; 1937 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1938 !uid_eq(attr->ia_uid, inode->i_uid)) { 1939 req->r_args.setattr.uid = cpu_to_le32( 1940 from_kuid(&init_user_ns, attr->ia_uid)); 1941 mask |= CEPH_SETATTR_UID; 1942 release |= CEPH_CAP_AUTH_SHARED; 1943 } 1944 } 1945 if (ia_valid & ATTR_GID) { 1946 dout("setattr %p gid %d -> %d\n", inode, 1947 from_kgid(&init_user_ns, inode->i_gid), 1948 from_kgid(&init_user_ns, attr->ia_gid)); 1949 if (issued & CEPH_CAP_AUTH_EXCL) { 1950 inode->i_gid = attr->ia_gid; 1951 dirtied |= CEPH_CAP_AUTH_EXCL; 1952 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1953 !gid_eq(attr->ia_gid, inode->i_gid)) { 1954 req->r_args.setattr.gid = cpu_to_le32( 1955 from_kgid(&init_user_ns, attr->ia_gid)); 1956 mask |= CEPH_SETATTR_GID; 1957 release |= CEPH_CAP_AUTH_SHARED; 1958 } 1959 } 1960 if (ia_valid & ATTR_MODE) { 1961 dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode, 1962 attr->ia_mode); 1963 if (issued & CEPH_CAP_AUTH_EXCL) { 1964 inode->i_mode = attr->ia_mode; 1965 dirtied |= CEPH_CAP_AUTH_EXCL; 1966 } else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 || 1967 attr->ia_mode != inode->i_mode) { 1968 inode->i_mode = attr->ia_mode; 1969 req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode); 1970 mask |= CEPH_SETATTR_MODE; 1971 release |= CEPH_CAP_AUTH_SHARED; 1972 } 1973 } 1974 1975 if (ia_valid & ATTR_ATIME) { 1976 dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode, 1977 inode->i_atime.tv_sec, inode->i_atime.tv_nsec, 1978 attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec); 1979 if (issued & CEPH_CAP_FILE_EXCL) { 1980 ci->i_time_warp_seq++; 1981 inode->i_atime = attr->ia_atime; 1982 dirtied |= CEPH_CAP_FILE_EXCL; 1983 } else if ((issued & CEPH_CAP_FILE_WR) && 1984 timespec_compare(&inode->i_atime, 1985 &attr->ia_atime) < 0) { 1986 inode->i_atime = attr->ia_atime; 1987 dirtied |= CEPH_CAP_FILE_WR; 1988 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 1989 !timespec_equal(&inode->i_atime, &attr->ia_atime)) { 1990 ceph_encode_timespec(&req->r_args.setattr.atime, 1991 &attr->ia_atime); 1992 mask |= CEPH_SETATTR_ATIME; 1993 release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD | 1994 CEPH_CAP_FILE_WR; 1995 } 1996 } 1997 if (ia_valid & ATTR_MTIME) { 1998 dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode, 1999 inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec, 2000 attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec); 2001 if (issued & CEPH_CAP_FILE_EXCL) { 2002 ci->i_time_warp_seq++; 2003 inode->i_mtime = attr->ia_mtime; 2004 dirtied |= CEPH_CAP_FILE_EXCL; 2005 } else if ((issued & CEPH_CAP_FILE_WR) && 2006 timespec_compare(&inode->i_mtime, 2007 &attr->ia_mtime) < 0) { 2008 inode->i_mtime = attr->ia_mtime; 2009 dirtied |= CEPH_CAP_FILE_WR; 2010 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 2011 !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) { 2012 ceph_encode_timespec(&req->r_args.setattr.mtime, 2013 &attr->ia_mtime); 2014 mask |= CEPH_SETATTR_MTIME; 2015 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 2016 CEPH_CAP_FILE_WR; 2017 } 2018 } 2019 if (ia_valid & ATTR_SIZE) { 2020 dout("setattr %p size %lld -> %lld\n", inode, 2021 inode->i_size, attr->ia_size); 2022 if ((issued & CEPH_CAP_FILE_EXCL) && 2023 attr->ia_size > inode->i_size) { 2024 i_size_write(inode, attr->ia_size); 2025 inode->i_blocks = calc_inode_blocks(attr->ia_size); 2026 inode->i_ctime = attr->ia_ctime; 2027 ci->i_reported_size = attr->ia_size; 2028 dirtied |= CEPH_CAP_FILE_EXCL; 2029 } else if ((issued & CEPH_CAP_FILE_SHARED) == 0 || 2030 attr->ia_size != inode->i_size) { 2031 req->r_args.setattr.size = cpu_to_le64(attr->ia_size); 2032 req->r_args.setattr.old_size = 2033 cpu_to_le64(inode->i_size); 2034 mask |= CEPH_SETATTR_SIZE; 2035 release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD | 2036 CEPH_CAP_FILE_WR; 2037 } 2038 } 2039 2040 /* these do nothing */ 2041 if (ia_valid & ATTR_CTIME) { 2042 bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME| 2043 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0; 2044 dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode, 2045 inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec, 2046 attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec, 2047 only ? "ctime only" : "ignored"); 2048 inode->i_ctime = attr->ia_ctime; 2049 if (only) { 2050 /* 2051 * if kernel wants to dirty ctime but nothing else, 2052 * we need to choose a cap to dirty under, or do 2053 * a almost-no-op setattr 2054 */ 2055 if (issued & CEPH_CAP_AUTH_EXCL) 2056 dirtied |= CEPH_CAP_AUTH_EXCL; 2057 else if (issued & CEPH_CAP_FILE_EXCL) 2058 dirtied |= CEPH_CAP_FILE_EXCL; 2059 else if (issued & CEPH_CAP_XATTR_EXCL) 2060 dirtied |= CEPH_CAP_XATTR_EXCL; 2061 else 2062 mask |= CEPH_SETATTR_CTIME; 2063 } 2064 } 2065 if (ia_valid & ATTR_FILE) 2066 dout("setattr %p ATTR_FILE ... hrm!\n", inode); 2067 2068 if (dirtied) { 2069 inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied, 2070 &prealloc_cf); 2071 inode->i_ctime = current_time(inode); 2072 } 2073 2074 release &= issued; 2075 spin_unlock(&ci->i_ceph_lock); 2076 if (lock_snap_rwsem) 2077 up_read(&mdsc->snap_rwsem); 2078 2079 if (inode_dirty_flags) 2080 __mark_inode_dirty(inode, inode_dirty_flags); 2081 2082 if (ia_valid & ATTR_MODE) { 2083 err = posix_acl_chmod(inode, attr->ia_mode); 2084 if (err) 2085 goto out_put; 2086 } 2087 2088 if (mask) { 2089 req->r_inode = inode; 2090 ihold(inode); 2091 req->r_inode_drop = release; 2092 req->r_args.setattr.mask = cpu_to_le32(mask); 2093 req->r_num_caps = 1; 2094 err = ceph_mdsc_do_request(mdsc, NULL, req); 2095 } 2096 dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err, 2097 ceph_cap_string(dirtied), mask); 2098 2099 ceph_mdsc_put_request(req); 2100 if (mask & CEPH_SETATTR_SIZE) 2101 __ceph_do_pending_vmtruncate(inode); 2102 ceph_free_cap_flush(prealloc_cf); 2103 return err; 2104 out_put: 2105 ceph_mdsc_put_request(req); 2106 ceph_free_cap_flush(prealloc_cf); 2107 return err; 2108 } 2109 2110 /* 2111 * setattr 2112 */ 2113 int ceph_setattr(struct dentry *dentry, struct iattr *attr) 2114 { 2115 struct inode *inode = d_inode(dentry); 2116 int err; 2117 2118 if (ceph_snap(inode) != CEPH_NOSNAP) 2119 return -EROFS; 2120 2121 err = setattr_prepare(dentry, attr); 2122 if (err != 0) 2123 return err; 2124 2125 return __ceph_setattr(inode, attr); 2126 } 2127 2128 /* 2129 * Verify that we have a lease on the given mask. If not, 2130 * do a getattr against an mds. 2131 */ 2132 int __ceph_do_getattr(struct inode *inode, struct page *locked_page, 2133 int mask, bool force) 2134 { 2135 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 2136 struct ceph_mds_client *mdsc = fsc->mdsc; 2137 struct ceph_mds_request *req; 2138 int err; 2139 2140 if (ceph_snap(inode) == CEPH_SNAPDIR) { 2141 dout("do_getattr inode %p SNAPDIR\n", inode); 2142 return 0; 2143 } 2144 2145 dout("do_getattr inode %p mask %s mode 0%o\n", 2146 inode, ceph_cap_string(mask), inode->i_mode); 2147 if (!force && ceph_caps_issued_mask(ceph_inode(inode), mask, 1)) 2148 return 0; 2149 2150 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); 2151 if (IS_ERR(req)) 2152 return PTR_ERR(req); 2153 req->r_inode = inode; 2154 ihold(inode); 2155 req->r_num_caps = 1; 2156 req->r_args.getattr.mask = cpu_to_le32(mask); 2157 req->r_locked_page = locked_page; 2158 err = ceph_mdsc_do_request(mdsc, NULL, req); 2159 if (locked_page && err == 0) { 2160 u64 inline_version = req->r_reply_info.targeti.inline_version; 2161 if (inline_version == 0) { 2162 /* the reply is supposed to contain inline data */ 2163 err = -EINVAL; 2164 } else if (inline_version == CEPH_INLINE_NONE) { 2165 err = -ENODATA; 2166 } else { 2167 err = req->r_reply_info.targeti.inline_len; 2168 } 2169 } 2170 ceph_mdsc_put_request(req); 2171 dout("do_getattr result=%d\n", err); 2172 return err; 2173 } 2174 2175 2176 /* 2177 * Check inode permissions. We verify we have a valid value for 2178 * the AUTH cap, then call the generic handler. 2179 */ 2180 int ceph_permission(struct inode *inode, int mask) 2181 { 2182 int err; 2183 2184 if (mask & MAY_NOT_BLOCK) 2185 return -ECHILD; 2186 2187 err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false); 2188 2189 if (!err) 2190 err = generic_permission(inode, mask); 2191 return err; 2192 } 2193 2194 /* 2195 * Get all attributes. Hopefully somedata we'll have a statlite() 2196 * and can limit the fields we require to be accurate. 2197 */ 2198 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, 2199 struct kstat *stat) 2200 { 2201 struct inode *inode = d_inode(dentry); 2202 struct ceph_inode_info *ci = ceph_inode(inode); 2203 int err; 2204 2205 err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL, false); 2206 if (!err) { 2207 generic_fillattr(inode, stat); 2208 stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino); 2209 if (ceph_snap(inode) != CEPH_NOSNAP) 2210 stat->dev = ceph_snap(inode); 2211 else 2212 stat->dev = 0; 2213 if (S_ISDIR(inode->i_mode)) { 2214 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), 2215 RBYTES)) 2216 stat->size = ci->i_rbytes; 2217 else 2218 stat->size = ci->i_files + ci->i_subdirs; 2219 stat->blocks = 0; 2220 stat->blksize = 65536; 2221 } 2222 } 2223 return err; 2224 } 2225