1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/spinlock.h> 4 #include <linux/fs_struct.h> 5 #include <linux/namei.h> 6 #include <linux/slab.h> 7 #include <linux/sched.h> 8 9 #include "super.h" 10 #include "mds_client.h" 11 12 /* 13 * Directory operations: readdir, lookup, create, link, unlink, 14 * rename, etc. 15 */ 16 17 /* 18 * Ceph MDS operations are specified in terms of a base ino and 19 * relative path. Thus, the client can specify an operation on a 20 * specific inode (e.g., a getattr due to fstat(2)), or as a path 21 * relative to, say, the root directory. 22 * 23 * Normally, we limit ourselves to strict inode ops (no path component) 24 * or dentry operations (a single path component relative to an ino). The 25 * exception to this is open_root_dentry(), which will open the mount 26 * point by name. 27 */ 28 29 const struct dentry_operations ceph_dentry_ops; 30 31 /* 32 * Initialize ceph dentry state. 33 */ 34 int ceph_init_dentry(struct dentry *dentry) 35 { 36 struct ceph_dentry_info *di; 37 38 if (dentry->d_fsdata) 39 return 0; 40 41 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_KERNEL | __GFP_ZERO); 42 if (!di) 43 return -ENOMEM; /* oh well */ 44 45 spin_lock(&dentry->d_lock); 46 if (dentry->d_fsdata) { 47 /* lost a race */ 48 kmem_cache_free(ceph_dentry_cachep, di); 49 goto out_unlock; 50 } 51 52 if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_NOSNAP) 53 d_set_d_op(dentry, &ceph_dentry_ops); 54 else if (ceph_snap(d_inode(dentry->d_parent)) == CEPH_SNAPDIR) 55 d_set_d_op(dentry, &ceph_snapdir_dentry_ops); 56 else 57 d_set_d_op(dentry, &ceph_snap_dentry_ops); 58 59 di->dentry = dentry; 60 di->lease_session = NULL; 61 dentry->d_time = jiffies; 62 /* avoid reordering d_fsdata setup so that the check above is safe */ 63 smp_mb(); 64 dentry->d_fsdata = di; 65 ceph_dentry_lru_add(dentry); 66 out_unlock: 67 spin_unlock(&dentry->d_lock); 68 return 0; 69 } 70 71 struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry) 72 { 73 struct inode *inode = NULL; 74 75 if (!dentry) 76 return NULL; 77 78 spin_lock(&dentry->d_lock); 79 if (!IS_ROOT(dentry)) { 80 inode = d_inode(dentry->d_parent); 81 ihold(inode); 82 } 83 spin_unlock(&dentry->d_lock); 84 return inode; 85 } 86 87 88 /* 89 * for readdir, we encode the directory frag and offset within that 90 * frag into f_pos. 91 */ 92 static unsigned fpos_frag(loff_t p) 93 { 94 return p >> 32; 95 } 96 static unsigned fpos_off(loff_t p) 97 { 98 return p & 0xffffffff; 99 } 100 101 static int fpos_cmp(loff_t l, loff_t r) 102 { 103 int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r)); 104 if (v) 105 return v; 106 return (int)(fpos_off(l) - fpos_off(r)); 107 } 108 109 /* 110 * make note of the last dentry we read, so we can 111 * continue at the same lexicographical point, 112 * regardless of what dir changes take place on the 113 * server. 114 */ 115 static int note_last_dentry(struct ceph_file_info *fi, const char *name, 116 int len, unsigned next_offset) 117 { 118 char *buf = kmalloc(len+1, GFP_KERNEL); 119 if (!buf) 120 return -ENOMEM; 121 kfree(fi->last_name); 122 fi->last_name = buf; 123 memcpy(fi->last_name, name, len); 124 fi->last_name[len] = 0; 125 fi->next_offset = next_offset; 126 dout("note_last_dentry '%s'\n", fi->last_name); 127 return 0; 128 } 129 130 /* 131 * When possible, we try to satisfy a readdir by peeking at the 132 * dcache. We make this work by carefully ordering dentries on 133 * d_child when we initially get results back from the MDS, and 134 * falling back to a "normal" sync readdir if any dentries in the dir 135 * are dropped. 136 * 137 * Complete dir indicates that we have all dentries in the dir. It is 138 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by 139 * the MDS if/when the directory is modified). 140 */ 141 static int __dcache_readdir(struct file *file, struct dir_context *ctx, 142 u32 shared_gen) 143 { 144 struct ceph_file_info *fi = file->private_data; 145 struct dentry *parent = file->f_path.dentry; 146 struct inode *dir = d_inode(parent); 147 struct dentry *dentry, *last = NULL; 148 struct ceph_dentry_info *di; 149 unsigned nsize = PAGE_CACHE_SIZE / sizeof(struct dentry *); 150 int err = 0; 151 loff_t ptr_pos = 0; 152 struct ceph_readdir_cache_control cache_ctl = {}; 153 154 dout("__dcache_readdir %p v%u at %llu\n", dir, shared_gen, ctx->pos); 155 156 /* we can calculate cache index for the first dirfrag */ 157 if (ceph_frag_is_leftmost(fpos_frag(ctx->pos))) { 158 cache_ctl.index = fpos_off(ctx->pos) - 2; 159 BUG_ON(cache_ctl.index < 0); 160 ptr_pos = cache_ctl.index * sizeof(struct dentry *); 161 } 162 163 while (true) { 164 pgoff_t pgoff; 165 bool emit_dentry; 166 167 if (ptr_pos >= i_size_read(dir)) { 168 fi->flags |= CEPH_F_ATEND; 169 err = 0; 170 break; 171 } 172 173 err = -EAGAIN; 174 pgoff = ptr_pos >> PAGE_CACHE_SHIFT; 175 if (!cache_ctl.page || pgoff != page_index(cache_ctl.page)) { 176 ceph_readdir_cache_release(&cache_ctl); 177 cache_ctl.page = find_lock_page(&dir->i_data, pgoff); 178 if (!cache_ctl.page) { 179 dout(" page %lu not found\n", pgoff); 180 break; 181 } 182 /* reading/filling the cache are serialized by 183 * i_mutex, no need to use page lock */ 184 unlock_page(cache_ctl.page); 185 cache_ctl.dentries = kmap(cache_ctl.page); 186 } 187 188 rcu_read_lock(); 189 spin_lock(&parent->d_lock); 190 /* check i_size again here, because empty directory can be 191 * marked as complete while not holding the i_mutex. */ 192 if (ceph_dir_is_complete_ordered(dir) && 193 ptr_pos < i_size_read(dir)) 194 dentry = cache_ctl.dentries[cache_ctl.index % nsize]; 195 else 196 dentry = NULL; 197 spin_unlock(&parent->d_lock); 198 if (dentry && !lockref_get_not_dead(&dentry->d_lockref)) 199 dentry = NULL; 200 rcu_read_unlock(); 201 if (!dentry) 202 break; 203 204 emit_dentry = false; 205 di = ceph_dentry(dentry); 206 spin_lock(&dentry->d_lock); 207 if (di->lease_shared_gen == shared_gen && 208 d_really_is_positive(dentry) && 209 ceph_snap(d_inode(dentry)) != CEPH_SNAPDIR && 210 ceph_ino(d_inode(dentry)) != CEPH_INO_CEPH && 211 fpos_cmp(ctx->pos, di->offset) <= 0) { 212 emit_dentry = true; 213 } 214 spin_unlock(&dentry->d_lock); 215 216 if (emit_dentry) { 217 dout(" %llu (%llu) dentry %p %pd %p\n", di->offset, ctx->pos, 218 dentry, dentry, d_inode(dentry)); 219 ctx->pos = di->offset; 220 if (!dir_emit(ctx, dentry->d_name.name, 221 dentry->d_name.len, 222 ceph_translate_ino(dentry->d_sb, 223 d_inode(dentry)->i_ino), 224 d_inode(dentry)->i_mode >> 12)) { 225 dput(dentry); 226 err = 0; 227 break; 228 } 229 ctx->pos++; 230 231 if (last) 232 dput(last); 233 last = dentry; 234 } else { 235 dput(dentry); 236 } 237 238 cache_ctl.index++; 239 ptr_pos += sizeof(struct dentry *); 240 } 241 ceph_readdir_cache_release(&cache_ctl); 242 if (last) { 243 int ret; 244 di = ceph_dentry(last); 245 ret = note_last_dentry(fi, last->d_name.name, last->d_name.len, 246 fpos_off(di->offset) + 1); 247 if (ret < 0) 248 err = ret; 249 dput(last); 250 } 251 return err; 252 } 253 254 static int ceph_readdir(struct file *file, struct dir_context *ctx) 255 { 256 struct ceph_file_info *fi = file->private_data; 257 struct inode *inode = file_inode(file); 258 struct ceph_inode_info *ci = ceph_inode(inode); 259 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 260 struct ceph_mds_client *mdsc = fsc->mdsc; 261 unsigned frag = fpos_frag(ctx->pos); 262 int off = fpos_off(ctx->pos); 263 int err; 264 u32 ftype; 265 struct ceph_mds_reply_info_parsed *rinfo; 266 267 dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off); 268 if (fi->flags & CEPH_F_ATEND) 269 return 0; 270 271 /* always start with . and .. */ 272 if (ctx->pos == 0) { 273 dout("readdir off 0 -> '.'\n"); 274 if (!dir_emit(ctx, ".", 1, 275 ceph_translate_ino(inode->i_sb, inode->i_ino), 276 inode->i_mode >> 12)) 277 return 0; 278 ctx->pos = 1; 279 off = 1; 280 } 281 if (ctx->pos == 1) { 282 ino_t ino = parent_ino(file->f_path.dentry); 283 dout("readdir off 1 -> '..'\n"); 284 if (!dir_emit(ctx, "..", 2, 285 ceph_translate_ino(inode->i_sb, ino), 286 inode->i_mode >> 12)) 287 return 0; 288 ctx->pos = 2; 289 off = 2; 290 } 291 292 /* can we use the dcache? */ 293 spin_lock(&ci->i_ceph_lock); 294 if (ceph_test_mount_opt(fsc, DCACHE) && 295 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && 296 ceph_snap(inode) != CEPH_SNAPDIR && 297 __ceph_dir_is_complete_ordered(ci) && 298 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { 299 u32 shared_gen = ci->i_shared_gen; 300 spin_unlock(&ci->i_ceph_lock); 301 err = __dcache_readdir(file, ctx, shared_gen); 302 if (err != -EAGAIN) 303 return err; 304 frag = fpos_frag(ctx->pos); 305 off = fpos_off(ctx->pos); 306 } else { 307 spin_unlock(&ci->i_ceph_lock); 308 } 309 310 /* proceed with a normal readdir */ 311 more: 312 /* do we have the correct frag content buffered? */ 313 if (fi->frag != frag || fi->last_readdir == NULL) { 314 struct ceph_mds_request *req; 315 int op = ceph_snap(inode) == CEPH_SNAPDIR ? 316 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR; 317 318 /* discard old result, if any */ 319 if (fi->last_readdir) { 320 ceph_mdsc_put_request(fi->last_readdir); 321 fi->last_readdir = NULL; 322 } 323 324 dout("readdir fetching %llx.%llx frag %x offset '%s'\n", 325 ceph_vinop(inode), frag, fi->last_name); 326 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 327 if (IS_ERR(req)) 328 return PTR_ERR(req); 329 err = ceph_alloc_readdir_reply_buffer(req, inode); 330 if (err) { 331 ceph_mdsc_put_request(req); 332 return err; 333 } 334 /* hints to request -> mds selection code */ 335 req->r_direct_mode = USE_AUTH_MDS; 336 req->r_direct_hash = ceph_frag_value(frag); 337 req->r_direct_is_hash = true; 338 if (fi->last_name) { 339 req->r_path2 = kstrdup(fi->last_name, GFP_KERNEL); 340 if (!req->r_path2) { 341 ceph_mdsc_put_request(req); 342 return -ENOMEM; 343 } 344 } 345 req->r_dir_release_cnt = fi->dir_release_count; 346 req->r_dir_ordered_cnt = fi->dir_ordered_count; 347 req->r_readdir_cache_idx = fi->readdir_cache_idx; 348 req->r_readdir_offset = fi->next_offset; 349 req->r_args.readdir.frag = cpu_to_le32(frag); 350 351 req->r_inode = inode; 352 ihold(inode); 353 req->r_dentry = dget(file->f_path.dentry); 354 err = ceph_mdsc_do_request(mdsc, NULL, req); 355 if (err < 0) { 356 ceph_mdsc_put_request(req); 357 return err; 358 } 359 dout("readdir got and parsed readdir result=%d" 360 " on frag %x, end=%d, complete=%d\n", err, frag, 361 (int)req->r_reply_info.dir_end, 362 (int)req->r_reply_info.dir_complete); 363 364 365 /* note next offset and last dentry name */ 366 rinfo = &req->r_reply_info; 367 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) { 368 frag = le32_to_cpu(rinfo->dir_dir->frag); 369 off = req->r_readdir_offset; 370 fi->next_offset = off; 371 } 372 373 fi->frag = frag; 374 fi->offset = fi->next_offset; 375 fi->last_readdir = req; 376 377 if (req->r_did_prepopulate) { 378 fi->readdir_cache_idx = req->r_readdir_cache_idx; 379 if (fi->readdir_cache_idx < 0) { 380 /* preclude from marking dir ordered */ 381 fi->dir_ordered_count = 0; 382 } else if (ceph_frag_is_leftmost(frag) && off == 2) { 383 /* note dir version at start of readdir so 384 * we can tell if any dentries get dropped */ 385 fi->dir_release_count = req->r_dir_release_cnt; 386 fi->dir_ordered_count = req->r_dir_ordered_cnt; 387 } 388 } else { 389 dout("readdir !did_prepopulate"); 390 /* disable readdir cache */ 391 fi->readdir_cache_idx = -1; 392 /* preclude from marking dir complete */ 393 fi->dir_release_count = 0; 394 } 395 396 if (req->r_reply_info.dir_end) { 397 kfree(fi->last_name); 398 fi->last_name = NULL; 399 if (ceph_frag_is_rightmost(frag)) 400 fi->next_offset = 2; 401 else 402 fi->next_offset = 0; 403 } else { 404 err = note_last_dentry(fi, 405 rinfo->dir_dname[rinfo->dir_nr-1], 406 rinfo->dir_dname_len[rinfo->dir_nr-1], 407 fi->next_offset + rinfo->dir_nr); 408 if (err) 409 return err; 410 } 411 } 412 413 rinfo = &fi->last_readdir->r_reply_info; 414 dout("readdir frag %x num %d off %d chunkoff %d\n", frag, 415 rinfo->dir_nr, off, fi->offset); 416 417 ctx->pos = ceph_make_fpos(frag, off); 418 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) { 419 struct ceph_mds_reply_inode *in = 420 rinfo->dir_in[off - fi->offset].in; 421 struct ceph_vino vino; 422 ino_t ino; 423 424 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n", 425 off, off - fi->offset, rinfo->dir_nr, ctx->pos, 426 rinfo->dir_dname_len[off - fi->offset], 427 rinfo->dir_dname[off - fi->offset], in); 428 BUG_ON(!in); 429 ftype = le32_to_cpu(in->mode) >> 12; 430 vino.ino = le64_to_cpu(in->ino); 431 vino.snap = le64_to_cpu(in->snapid); 432 ino = ceph_vino_to_ino(vino); 433 if (!dir_emit(ctx, 434 rinfo->dir_dname[off - fi->offset], 435 rinfo->dir_dname_len[off - fi->offset], 436 ceph_translate_ino(inode->i_sb, ino), ftype)) { 437 dout("filldir stopping us...\n"); 438 return 0; 439 } 440 off++; 441 ctx->pos++; 442 } 443 444 if (fi->last_name) { 445 ceph_mdsc_put_request(fi->last_readdir); 446 fi->last_readdir = NULL; 447 goto more; 448 } 449 450 /* more frags? */ 451 if (!ceph_frag_is_rightmost(frag)) { 452 frag = ceph_frag_next(frag); 453 off = 0; 454 ctx->pos = ceph_make_fpos(frag, off); 455 dout("readdir next frag is %x\n", frag); 456 goto more; 457 } 458 fi->flags |= CEPH_F_ATEND; 459 460 /* 461 * if dir_release_count still matches the dir, no dentries 462 * were released during the whole readdir, and we should have 463 * the complete dir contents in our cache. 464 */ 465 if (atomic64_read(&ci->i_release_count) == fi->dir_release_count) { 466 spin_lock(&ci->i_ceph_lock); 467 if (fi->dir_ordered_count == atomic64_read(&ci->i_ordered_count)) { 468 dout(" marking %p complete and ordered\n", inode); 469 /* use i_size to track number of entries in 470 * readdir cache */ 471 BUG_ON(fi->readdir_cache_idx < 0); 472 i_size_write(inode, fi->readdir_cache_idx * 473 sizeof(struct dentry*)); 474 } else { 475 dout(" marking %p complete\n", inode); 476 } 477 __ceph_dir_set_complete(ci, fi->dir_release_count, 478 fi->dir_ordered_count); 479 spin_unlock(&ci->i_ceph_lock); 480 } 481 482 dout("readdir %p file %p done.\n", inode, file); 483 return 0; 484 } 485 486 static void reset_readdir(struct ceph_file_info *fi, unsigned frag) 487 { 488 if (fi->last_readdir) { 489 ceph_mdsc_put_request(fi->last_readdir); 490 fi->last_readdir = NULL; 491 } 492 kfree(fi->last_name); 493 fi->last_name = NULL; 494 fi->dir_release_count = 0; 495 fi->readdir_cache_idx = -1; 496 if (ceph_frag_is_leftmost(frag)) 497 fi->next_offset = 2; /* compensate for . and .. */ 498 else 499 fi->next_offset = 0; 500 fi->flags &= ~CEPH_F_ATEND; 501 } 502 503 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence) 504 { 505 struct ceph_file_info *fi = file->private_data; 506 struct inode *inode = file->f_mapping->host; 507 loff_t old_offset = ceph_make_fpos(fi->frag, fi->next_offset); 508 loff_t retval; 509 510 mutex_lock(&inode->i_mutex); 511 retval = -EINVAL; 512 switch (whence) { 513 case SEEK_CUR: 514 offset += file->f_pos; 515 case SEEK_SET: 516 break; 517 case SEEK_END: 518 retval = -EOPNOTSUPP; 519 default: 520 goto out; 521 } 522 523 if (offset >= 0) { 524 if (offset != file->f_pos) { 525 file->f_pos = offset; 526 file->f_version = 0; 527 fi->flags &= ~CEPH_F_ATEND; 528 } 529 retval = offset; 530 531 if (offset == 0 || 532 fpos_frag(offset) != fi->frag || 533 fpos_off(offset) < fi->offset) { 534 /* discard buffered readdir content on seekdir(0), or 535 * seek to new frag, or seek prior to current chunk */ 536 dout("dir_llseek dropping %p content\n", file); 537 reset_readdir(fi, fpos_frag(offset)); 538 } else if (fpos_cmp(offset, old_offset) > 0) { 539 /* reset dir_release_count if we did a forward seek */ 540 fi->dir_release_count = 0; 541 fi->readdir_cache_idx = -1; 542 } 543 } 544 out: 545 mutex_unlock(&inode->i_mutex); 546 return retval; 547 } 548 549 /* 550 * Handle lookups for the hidden .snap directory. 551 */ 552 int ceph_handle_snapdir(struct ceph_mds_request *req, 553 struct dentry *dentry, int err) 554 { 555 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); 556 struct inode *parent = d_inode(dentry->d_parent); /* we hold i_mutex */ 557 558 /* .snap dir? */ 559 if (err == -ENOENT && 560 ceph_snap(parent) == CEPH_NOSNAP && 561 strcmp(dentry->d_name.name, 562 fsc->mount_options->snapdir_name) == 0) { 563 struct inode *inode = ceph_get_snapdir(parent); 564 dout("ENOENT on snapdir %p '%pd', linking to snapdir %p\n", 565 dentry, dentry, inode); 566 BUG_ON(!d_unhashed(dentry)); 567 d_add(dentry, inode); 568 err = 0; 569 } 570 return err; 571 } 572 573 /* 574 * Figure out final result of a lookup/open request. 575 * 576 * Mainly, make sure we return the final req->r_dentry (if it already 577 * existed) in place of the original VFS-provided dentry when they 578 * differ. 579 * 580 * Gracefully handle the case where the MDS replies with -ENOENT and 581 * no trace (which it may do, at its discretion, e.g., if it doesn't 582 * care to issue a lease on the negative dentry). 583 */ 584 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, 585 struct dentry *dentry, int err) 586 { 587 if (err == -ENOENT) { 588 /* no trace? */ 589 err = 0; 590 if (!req->r_reply_info.head->is_dentry) { 591 dout("ENOENT and no trace, dentry %p inode %p\n", 592 dentry, d_inode(dentry)); 593 if (d_really_is_positive(dentry)) { 594 d_drop(dentry); 595 err = -ENOENT; 596 } else { 597 d_add(dentry, NULL); 598 } 599 } 600 } 601 if (err) 602 dentry = ERR_PTR(err); 603 else if (dentry != req->r_dentry) 604 dentry = dget(req->r_dentry); /* we got spliced */ 605 else 606 dentry = NULL; 607 return dentry; 608 } 609 610 static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry) 611 { 612 return ceph_ino(inode) == CEPH_INO_ROOT && 613 strncmp(dentry->d_name.name, ".ceph", 5) == 0; 614 } 615 616 /* 617 * Look up a single dir entry. If there is a lookup intent, inform 618 * the MDS so that it gets our 'caps wanted' value in a single op. 619 */ 620 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, 621 unsigned int flags) 622 { 623 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 624 struct ceph_mds_client *mdsc = fsc->mdsc; 625 struct ceph_mds_request *req; 626 int op; 627 int err; 628 629 dout("lookup %p dentry %p '%pd'\n", 630 dir, dentry, dentry); 631 632 if (dentry->d_name.len > NAME_MAX) 633 return ERR_PTR(-ENAMETOOLONG); 634 635 err = ceph_init_dentry(dentry); 636 if (err < 0) 637 return ERR_PTR(err); 638 639 /* can we conclude ENOENT locally? */ 640 if (d_really_is_negative(dentry)) { 641 struct ceph_inode_info *ci = ceph_inode(dir); 642 struct ceph_dentry_info *di = ceph_dentry(dentry); 643 644 spin_lock(&ci->i_ceph_lock); 645 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags); 646 if (strncmp(dentry->d_name.name, 647 fsc->mount_options->snapdir_name, 648 dentry->d_name.len) && 649 !is_root_ceph_dentry(dir, dentry) && 650 ceph_test_mount_opt(fsc, DCACHE) && 651 __ceph_dir_is_complete(ci) && 652 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { 653 spin_unlock(&ci->i_ceph_lock); 654 dout(" dir %p complete, -ENOENT\n", dir); 655 d_add(dentry, NULL); 656 di->lease_shared_gen = ci->i_shared_gen; 657 return NULL; 658 } 659 spin_unlock(&ci->i_ceph_lock); 660 } 661 662 op = ceph_snap(dir) == CEPH_SNAPDIR ? 663 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; 664 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); 665 if (IS_ERR(req)) 666 return ERR_CAST(req); 667 req->r_dentry = dget(dentry); 668 req->r_num_caps = 2; 669 /* we only need inode linkage */ 670 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); 671 req->r_locked_dir = dir; 672 err = ceph_mdsc_do_request(mdsc, NULL, req); 673 err = ceph_handle_snapdir(req, dentry, err); 674 dentry = ceph_finish_lookup(req, dentry, err); 675 ceph_mdsc_put_request(req); /* will dput(dentry) */ 676 dout("lookup result=%p\n", dentry); 677 return dentry; 678 } 679 680 /* 681 * If we do a create but get no trace back from the MDS, follow up with 682 * a lookup (the VFS expects us to link up the provided dentry). 683 */ 684 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) 685 { 686 struct dentry *result = ceph_lookup(dir, dentry, 0); 687 688 if (result && !IS_ERR(result)) { 689 /* 690 * We created the item, then did a lookup, and found 691 * it was already linked to another inode we already 692 * had in our cache (and thus got spliced). To not 693 * confuse VFS (especially when inode is a directory), 694 * we don't link our dentry to that inode, return an 695 * error instead. 696 * 697 * This event should be rare and it happens only when 698 * we talk to old MDS. Recent MDS does not send traceless 699 * reply for request that creates new inode. 700 */ 701 d_drop(result); 702 return -ESTALE; 703 } 704 return PTR_ERR(result); 705 } 706 707 static int ceph_mknod(struct inode *dir, struct dentry *dentry, 708 umode_t mode, dev_t rdev) 709 { 710 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 711 struct ceph_mds_client *mdsc = fsc->mdsc; 712 struct ceph_mds_request *req; 713 struct ceph_acls_info acls = {}; 714 int err; 715 716 if (ceph_snap(dir) != CEPH_NOSNAP) 717 return -EROFS; 718 719 err = ceph_pre_init_acls(dir, &mode, &acls); 720 if (err < 0) 721 return err; 722 723 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n", 724 dir, dentry, mode, rdev); 725 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS); 726 if (IS_ERR(req)) { 727 err = PTR_ERR(req); 728 goto out; 729 } 730 req->r_dentry = dget(dentry); 731 req->r_num_caps = 2; 732 req->r_locked_dir = dir; 733 req->r_args.mknod.mode = cpu_to_le32(mode); 734 req->r_args.mknod.rdev = cpu_to_le32(rdev); 735 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 736 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 737 if (acls.pagelist) { 738 req->r_pagelist = acls.pagelist; 739 acls.pagelist = NULL; 740 } 741 err = ceph_mdsc_do_request(mdsc, dir, req); 742 if (!err && !req->r_reply_info.head->is_dentry) 743 err = ceph_handle_notrace_create(dir, dentry); 744 ceph_mdsc_put_request(req); 745 out: 746 if (!err) 747 ceph_init_inode_acls(d_inode(dentry), &acls); 748 else 749 d_drop(dentry); 750 ceph_release_acls_info(&acls); 751 return err; 752 } 753 754 static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode, 755 bool excl) 756 { 757 return ceph_mknod(dir, dentry, mode, 0); 758 } 759 760 static int ceph_symlink(struct inode *dir, struct dentry *dentry, 761 const char *dest) 762 { 763 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 764 struct ceph_mds_client *mdsc = fsc->mdsc; 765 struct ceph_mds_request *req; 766 int err; 767 768 if (ceph_snap(dir) != CEPH_NOSNAP) 769 return -EROFS; 770 771 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest); 772 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS); 773 if (IS_ERR(req)) { 774 err = PTR_ERR(req); 775 goto out; 776 } 777 req->r_path2 = kstrdup(dest, GFP_KERNEL); 778 if (!req->r_path2) { 779 err = -ENOMEM; 780 ceph_mdsc_put_request(req); 781 goto out; 782 } 783 req->r_locked_dir = dir; 784 req->r_dentry = dget(dentry); 785 req->r_num_caps = 2; 786 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 787 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 788 err = ceph_mdsc_do_request(mdsc, dir, req); 789 if (!err && !req->r_reply_info.head->is_dentry) 790 err = ceph_handle_notrace_create(dir, dentry); 791 ceph_mdsc_put_request(req); 792 out: 793 if (err) 794 d_drop(dentry); 795 return err; 796 } 797 798 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) 799 { 800 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 801 struct ceph_mds_client *mdsc = fsc->mdsc; 802 struct ceph_mds_request *req; 803 struct ceph_acls_info acls = {}; 804 int err = -EROFS; 805 int op; 806 807 if (ceph_snap(dir) == CEPH_SNAPDIR) { 808 /* mkdir .snap/foo is a MKSNAP */ 809 op = CEPH_MDS_OP_MKSNAP; 810 dout("mksnap dir %p snap '%pd' dn %p\n", dir, 811 dentry, dentry); 812 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 813 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode); 814 op = CEPH_MDS_OP_MKDIR; 815 } else { 816 goto out; 817 } 818 819 mode |= S_IFDIR; 820 err = ceph_pre_init_acls(dir, &mode, &acls); 821 if (err < 0) 822 goto out; 823 824 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 825 if (IS_ERR(req)) { 826 err = PTR_ERR(req); 827 goto out; 828 } 829 830 req->r_dentry = dget(dentry); 831 req->r_num_caps = 2; 832 req->r_locked_dir = dir; 833 req->r_args.mkdir.mode = cpu_to_le32(mode); 834 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 835 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 836 if (acls.pagelist) { 837 req->r_pagelist = acls.pagelist; 838 acls.pagelist = NULL; 839 } 840 err = ceph_mdsc_do_request(mdsc, dir, req); 841 if (!err && 842 !req->r_reply_info.head->is_target && 843 !req->r_reply_info.head->is_dentry) 844 err = ceph_handle_notrace_create(dir, dentry); 845 ceph_mdsc_put_request(req); 846 out: 847 if (!err) 848 ceph_init_inode_acls(d_inode(dentry), &acls); 849 else 850 d_drop(dentry); 851 ceph_release_acls_info(&acls); 852 return err; 853 } 854 855 static int ceph_link(struct dentry *old_dentry, struct inode *dir, 856 struct dentry *dentry) 857 { 858 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 859 struct ceph_mds_client *mdsc = fsc->mdsc; 860 struct ceph_mds_request *req; 861 int err; 862 863 if (ceph_snap(dir) != CEPH_NOSNAP) 864 return -EROFS; 865 866 dout("link in dir %p old_dentry %p dentry %p\n", dir, 867 old_dentry, dentry); 868 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS); 869 if (IS_ERR(req)) { 870 d_drop(dentry); 871 return PTR_ERR(req); 872 } 873 req->r_dentry = dget(dentry); 874 req->r_num_caps = 2; 875 req->r_old_dentry = dget(old_dentry); 876 req->r_locked_dir = dir; 877 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 878 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 879 /* release LINK_SHARED on source inode (mds will lock it) */ 880 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED; 881 err = ceph_mdsc_do_request(mdsc, dir, req); 882 if (err) { 883 d_drop(dentry); 884 } else if (!req->r_reply_info.head->is_dentry) { 885 ihold(d_inode(old_dentry)); 886 d_instantiate(dentry, d_inode(old_dentry)); 887 } 888 ceph_mdsc_put_request(req); 889 return err; 890 } 891 892 /* 893 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it 894 * looks like the link count will hit 0, drop any other caps (other 895 * than PIN) we don't specifically want (due to the file still being 896 * open). 897 */ 898 static int drop_caps_for_unlink(struct inode *inode) 899 { 900 struct ceph_inode_info *ci = ceph_inode(inode); 901 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; 902 903 spin_lock(&ci->i_ceph_lock); 904 if (inode->i_nlink == 1) { 905 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); 906 ci->i_ceph_flags |= CEPH_I_NODELAY; 907 } 908 spin_unlock(&ci->i_ceph_lock); 909 return drop; 910 } 911 912 /* 913 * rmdir and unlink are differ only by the metadata op code 914 */ 915 static int ceph_unlink(struct inode *dir, struct dentry *dentry) 916 { 917 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 918 struct ceph_mds_client *mdsc = fsc->mdsc; 919 struct inode *inode = d_inode(dentry); 920 struct ceph_mds_request *req; 921 int err = -EROFS; 922 int op; 923 924 if (ceph_snap(dir) == CEPH_SNAPDIR) { 925 /* rmdir .snap/foo is RMSNAP */ 926 dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry); 927 op = CEPH_MDS_OP_RMSNAP; 928 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 929 dout("unlink/rmdir dir %p dn %p inode %p\n", 930 dir, dentry, inode); 931 op = d_is_dir(dentry) ? 932 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; 933 } else 934 goto out; 935 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 936 if (IS_ERR(req)) { 937 err = PTR_ERR(req); 938 goto out; 939 } 940 req->r_dentry = dget(dentry); 941 req->r_num_caps = 2; 942 req->r_locked_dir = dir; 943 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 944 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 945 req->r_inode_drop = drop_caps_for_unlink(inode); 946 err = ceph_mdsc_do_request(mdsc, dir, req); 947 if (!err && !req->r_reply_info.head->is_dentry) 948 d_delete(dentry); 949 ceph_mdsc_put_request(req); 950 out: 951 return err; 952 } 953 954 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, 955 struct inode *new_dir, struct dentry *new_dentry) 956 { 957 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb); 958 struct ceph_mds_client *mdsc = fsc->mdsc; 959 struct ceph_mds_request *req; 960 int op = CEPH_MDS_OP_RENAME; 961 int err; 962 963 if (ceph_snap(old_dir) != ceph_snap(new_dir)) 964 return -EXDEV; 965 if (ceph_snap(old_dir) != CEPH_NOSNAP) { 966 if (old_dir == new_dir && ceph_snap(old_dir) == CEPH_SNAPDIR) 967 op = CEPH_MDS_OP_RENAMESNAP; 968 else 969 return -EROFS; 970 } 971 dout("rename dir %p dentry %p to dir %p dentry %p\n", 972 old_dir, old_dentry, new_dir, new_dentry); 973 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 974 if (IS_ERR(req)) 975 return PTR_ERR(req); 976 ihold(old_dir); 977 req->r_dentry = dget(new_dentry); 978 req->r_num_caps = 2; 979 req->r_old_dentry = dget(old_dentry); 980 req->r_old_dentry_dir = old_dir; 981 req->r_locked_dir = new_dir; 982 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED; 983 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL; 984 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 985 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 986 /* release LINK_RDCACHE on source inode (mds will lock it) */ 987 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED; 988 if (d_really_is_positive(new_dentry)) 989 req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry)); 990 err = ceph_mdsc_do_request(mdsc, old_dir, req); 991 if (!err && !req->r_reply_info.head->is_dentry) { 992 /* 993 * Normally d_move() is done by fill_trace (called by 994 * do_request, above). If there is no trace, we need 995 * to do it here. 996 */ 997 998 /* d_move screws up sibling dentries' offsets */ 999 ceph_dir_clear_complete(old_dir); 1000 ceph_dir_clear_complete(new_dir); 1001 1002 d_move(old_dentry, new_dentry); 1003 1004 /* ensure target dentry is invalidated, despite 1005 rehashing bug in vfs_rename_dir */ 1006 ceph_invalidate_dentry_lease(new_dentry); 1007 } 1008 ceph_mdsc_put_request(req); 1009 return err; 1010 } 1011 1012 /* 1013 * Ensure a dentry lease will no longer revalidate. 1014 */ 1015 void ceph_invalidate_dentry_lease(struct dentry *dentry) 1016 { 1017 spin_lock(&dentry->d_lock); 1018 dentry->d_time = jiffies; 1019 ceph_dentry(dentry)->lease_shared_gen = 0; 1020 spin_unlock(&dentry->d_lock); 1021 } 1022 1023 /* 1024 * Check if dentry lease is valid. If not, delete the lease. Try to 1025 * renew if the least is more than half up. 1026 */ 1027 static int dentry_lease_is_valid(struct dentry *dentry) 1028 { 1029 struct ceph_dentry_info *di; 1030 struct ceph_mds_session *s; 1031 int valid = 0; 1032 u32 gen; 1033 unsigned long ttl; 1034 struct ceph_mds_session *session = NULL; 1035 struct inode *dir = NULL; 1036 u32 seq = 0; 1037 1038 spin_lock(&dentry->d_lock); 1039 di = ceph_dentry(dentry); 1040 if (di->lease_session) { 1041 s = di->lease_session; 1042 spin_lock(&s->s_gen_ttl_lock); 1043 gen = s->s_cap_gen; 1044 ttl = s->s_cap_ttl; 1045 spin_unlock(&s->s_gen_ttl_lock); 1046 1047 if (di->lease_gen == gen && 1048 time_before(jiffies, dentry->d_time) && 1049 time_before(jiffies, ttl)) { 1050 valid = 1; 1051 if (di->lease_renew_after && 1052 time_after(jiffies, di->lease_renew_after)) { 1053 /* we should renew */ 1054 dir = d_inode(dentry->d_parent); 1055 session = ceph_get_mds_session(s); 1056 seq = di->lease_seq; 1057 di->lease_renew_after = 0; 1058 di->lease_renew_from = jiffies; 1059 } 1060 } 1061 } 1062 spin_unlock(&dentry->d_lock); 1063 1064 if (session) { 1065 ceph_mdsc_lease_send_msg(session, dir, dentry, 1066 CEPH_MDS_LEASE_RENEW, seq); 1067 ceph_put_mds_session(session); 1068 } 1069 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid); 1070 return valid; 1071 } 1072 1073 /* 1074 * Check if directory-wide content lease/cap is valid. 1075 */ 1076 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry) 1077 { 1078 struct ceph_inode_info *ci = ceph_inode(dir); 1079 struct ceph_dentry_info *di = ceph_dentry(dentry); 1080 int valid = 0; 1081 1082 spin_lock(&ci->i_ceph_lock); 1083 if (ci->i_shared_gen == di->lease_shared_gen) 1084 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1); 1085 spin_unlock(&ci->i_ceph_lock); 1086 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n", 1087 dir, (unsigned)ci->i_shared_gen, dentry, 1088 (unsigned)di->lease_shared_gen, valid); 1089 return valid; 1090 } 1091 1092 /* 1093 * Check if cached dentry can be trusted. 1094 */ 1095 static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) 1096 { 1097 int valid = 0; 1098 struct inode *dir; 1099 1100 if (flags & LOOKUP_RCU) 1101 return -ECHILD; 1102 1103 dout("d_revalidate %p '%pd' inode %p offset %lld\n", dentry, 1104 dentry, d_inode(dentry), ceph_dentry(dentry)->offset); 1105 1106 dir = ceph_get_dentry_parent_inode(dentry); 1107 1108 /* always trust cached snapped dentries, snapdir dentry */ 1109 if (ceph_snap(dir) != CEPH_NOSNAP) { 1110 dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry, 1111 dentry, d_inode(dentry)); 1112 valid = 1; 1113 } else if (d_really_is_positive(dentry) && 1114 ceph_snap(d_inode(dentry)) == CEPH_SNAPDIR) { 1115 valid = 1; 1116 } else if (dentry_lease_is_valid(dentry) || 1117 dir_lease_is_valid(dir, dentry)) { 1118 if (d_really_is_positive(dentry)) 1119 valid = ceph_is_any_caps(d_inode(dentry)); 1120 else 1121 valid = 1; 1122 } 1123 1124 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid"); 1125 if (valid) { 1126 ceph_dentry_lru_touch(dentry); 1127 } else { 1128 ceph_dir_clear_complete(dir); 1129 } 1130 iput(dir); 1131 return valid; 1132 } 1133 1134 /* 1135 * Release our ceph_dentry_info. 1136 */ 1137 static void ceph_d_release(struct dentry *dentry) 1138 { 1139 struct ceph_dentry_info *di = ceph_dentry(dentry); 1140 1141 dout("d_release %p\n", dentry); 1142 ceph_dentry_lru_del(dentry); 1143 if (di->lease_session) 1144 ceph_put_mds_session(di->lease_session); 1145 kmem_cache_free(ceph_dentry_cachep, di); 1146 dentry->d_fsdata = NULL; 1147 } 1148 1149 static int ceph_snapdir_d_revalidate(struct dentry *dentry, 1150 unsigned int flags) 1151 { 1152 /* 1153 * Eventually, we'll want to revalidate snapped metadata 1154 * too... probably... 1155 */ 1156 return 1; 1157 } 1158 1159 /* 1160 * When the VFS prunes a dentry from the cache, we need to clear the 1161 * complete flag on the parent directory. 1162 * 1163 * Called under dentry->d_lock. 1164 */ 1165 static void ceph_d_prune(struct dentry *dentry) 1166 { 1167 dout("ceph_d_prune %p\n", dentry); 1168 1169 /* do we have a valid parent? */ 1170 if (IS_ROOT(dentry)) 1171 return; 1172 1173 /* if we are not hashed, we don't affect dir's completeness */ 1174 if (d_unhashed(dentry)) 1175 return; 1176 1177 /* 1178 * we hold d_lock, so d_parent is stable, and d_fsdata is never 1179 * cleared until d_release 1180 */ 1181 ceph_dir_clear_complete(d_inode(dentry->d_parent)); 1182 } 1183 1184 /* 1185 * read() on a dir. This weird interface hack only works if mounted 1186 * with '-o dirstat'. 1187 */ 1188 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, 1189 loff_t *ppos) 1190 { 1191 struct ceph_file_info *cf = file->private_data; 1192 struct inode *inode = file_inode(file); 1193 struct ceph_inode_info *ci = ceph_inode(inode); 1194 int left; 1195 const int bufsize = 1024; 1196 1197 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) 1198 return -EISDIR; 1199 1200 if (!cf->dir_info) { 1201 cf->dir_info = kmalloc(bufsize, GFP_KERNEL); 1202 if (!cf->dir_info) 1203 return -ENOMEM; 1204 cf->dir_info_len = 1205 snprintf(cf->dir_info, bufsize, 1206 "entries: %20lld\n" 1207 " files: %20lld\n" 1208 " subdirs: %20lld\n" 1209 "rentries: %20lld\n" 1210 " rfiles: %20lld\n" 1211 " rsubdirs: %20lld\n" 1212 "rbytes: %20lld\n" 1213 "rctime: %10ld.%09ld\n", 1214 ci->i_files + ci->i_subdirs, 1215 ci->i_files, 1216 ci->i_subdirs, 1217 ci->i_rfiles + ci->i_rsubdirs, 1218 ci->i_rfiles, 1219 ci->i_rsubdirs, 1220 ci->i_rbytes, 1221 (long)ci->i_rctime.tv_sec, 1222 (long)ci->i_rctime.tv_nsec); 1223 } 1224 1225 if (*ppos >= cf->dir_info_len) 1226 return 0; 1227 size = min_t(unsigned, size, cf->dir_info_len-*ppos); 1228 left = copy_to_user(buf, cf->dir_info + *ppos, size); 1229 if (left == size) 1230 return -EFAULT; 1231 *ppos += (size - left); 1232 return size - left; 1233 } 1234 1235 /* 1236 * We maintain a private dentry LRU. 1237 * 1238 * FIXME: this needs to be changed to a per-mds lru to be useful. 1239 */ 1240 void ceph_dentry_lru_add(struct dentry *dn) 1241 { 1242 struct ceph_dentry_info *di = ceph_dentry(dn); 1243 struct ceph_mds_client *mdsc; 1244 1245 dout("dentry_lru_add %p %p '%pd'\n", di, dn, dn); 1246 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1247 spin_lock(&mdsc->dentry_lru_lock); 1248 list_add_tail(&di->lru, &mdsc->dentry_lru); 1249 mdsc->num_dentry++; 1250 spin_unlock(&mdsc->dentry_lru_lock); 1251 } 1252 1253 void ceph_dentry_lru_touch(struct dentry *dn) 1254 { 1255 struct ceph_dentry_info *di = ceph_dentry(dn); 1256 struct ceph_mds_client *mdsc; 1257 1258 dout("dentry_lru_touch %p %p '%pd' (offset %lld)\n", di, dn, dn, 1259 di->offset); 1260 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1261 spin_lock(&mdsc->dentry_lru_lock); 1262 list_move_tail(&di->lru, &mdsc->dentry_lru); 1263 spin_unlock(&mdsc->dentry_lru_lock); 1264 } 1265 1266 void ceph_dentry_lru_del(struct dentry *dn) 1267 { 1268 struct ceph_dentry_info *di = ceph_dentry(dn); 1269 struct ceph_mds_client *mdsc; 1270 1271 dout("dentry_lru_del %p %p '%pd'\n", di, dn, dn); 1272 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1273 spin_lock(&mdsc->dentry_lru_lock); 1274 list_del_init(&di->lru); 1275 mdsc->num_dentry--; 1276 spin_unlock(&mdsc->dentry_lru_lock); 1277 } 1278 1279 /* 1280 * Return name hash for a given dentry. This is dependent on 1281 * the parent directory's hash function. 1282 */ 1283 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn) 1284 { 1285 struct ceph_inode_info *dci = ceph_inode(dir); 1286 1287 switch (dci->i_dir_layout.dl_dir_hash) { 1288 case 0: /* for backward compat */ 1289 case CEPH_STR_HASH_LINUX: 1290 return dn->d_name.hash; 1291 1292 default: 1293 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash, 1294 dn->d_name.name, dn->d_name.len); 1295 } 1296 } 1297 1298 const struct file_operations ceph_dir_fops = { 1299 .read = ceph_read_dir, 1300 .iterate = ceph_readdir, 1301 .llseek = ceph_dir_llseek, 1302 .open = ceph_open, 1303 .release = ceph_release, 1304 .unlocked_ioctl = ceph_ioctl, 1305 .fsync = ceph_fsync, 1306 }; 1307 1308 const struct file_operations ceph_snapdir_fops = { 1309 .iterate = ceph_readdir, 1310 .llseek = ceph_dir_llseek, 1311 .open = ceph_open, 1312 .release = ceph_release, 1313 }; 1314 1315 const struct inode_operations ceph_dir_iops = { 1316 .lookup = ceph_lookup, 1317 .permission = ceph_permission, 1318 .getattr = ceph_getattr, 1319 .setattr = ceph_setattr, 1320 .setxattr = ceph_setxattr, 1321 .getxattr = ceph_getxattr, 1322 .listxattr = ceph_listxattr, 1323 .removexattr = ceph_removexattr, 1324 .get_acl = ceph_get_acl, 1325 .set_acl = ceph_set_acl, 1326 .mknod = ceph_mknod, 1327 .symlink = ceph_symlink, 1328 .mkdir = ceph_mkdir, 1329 .link = ceph_link, 1330 .unlink = ceph_unlink, 1331 .rmdir = ceph_unlink, 1332 .rename = ceph_rename, 1333 .create = ceph_create, 1334 .atomic_open = ceph_atomic_open, 1335 }; 1336 1337 const struct inode_operations ceph_snapdir_iops = { 1338 .lookup = ceph_lookup, 1339 .permission = ceph_permission, 1340 .getattr = ceph_getattr, 1341 .mkdir = ceph_mkdir, 1342 .rmdir = ceph_unlink, 1343 .rename = ceph_rename, 1344 }; 1345 1346 const struct dentry_operations ceph_dentry_ops = { 1347 .d_revalidate = ceph_d_revalidate, 1348 .d_release = ceph_d_release, 1349 .d_prune = ceph_d_prune, 1350 }; 1351 1352 const struct dentry_operations ceph_snapdir_dentry_ops = { 1353 .d_revalidate = ceph_snapdir_d_revalidate, 1354 .d_release = ceph_d_release, 1355 }; 1356 1357 const struct dentry_operations ceph_snap_dentry_ops = { 1358 .d_release = ceph_d_release, 1359 .d_prune = ceph_d_prune, 1360 }; 1361