1 #include <linux/ceph/ceph_debug.h> 2 3 #include <linux/spinlock.h> 4 #include <linux/fs_struct.h> 5 #include <linux/namei.h> 6 #include <linux/slab.h> 7 #include <linux/sched.h> 8 9 #include "super.h" 10 #include "mds_client.h" 11 12 /* 13 * Directory operations: readdir, lookup, create, link, unlink, 14 * rename, etc. 15 */ 16 17 /* 18 * Ceph MDS operations are specified in terms of a base ino and 19 * relative path. Thus, the client can specify an operation on a 20 * specific inode (e.g., a getattr due to fstat(2)), or as a path 21 * relative to, say, the root directory. 22 * 23 * Normally, we limit ourselves to strict inode ops (no path component) 24 * or dentry operations (a single path component relative to an ino). The 25 * exception to this is open_root_dentry(), which will open the mount 26 * point by name. 27 */ 28 29 const struct inode_operations ceph_dir_iops; 30 const struct file_operations ceph_dir_fops; 31 const struct dentry_operations ceph_dentry_ops; 32 33 /* 34 * Initialize ceph dentry state. 35 */ 36 int ceph_init_dentry(struct dentry *dentry) 37 { 38 struct ceph_dentry_info *di; 39 40 if (dentry->d_fsdata) 41 return 0; 42 43 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP) 44 dentry->d_op = &ceph_dentry_ops; 45 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR) 46 dentry->d_op = &ceph_snapdir_dentry_ops; 47 else 48 dentry->d_op = &ceph_snap_dentry_ops; 49 50 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO); 51 if (!di) 52 return -ENOMEM; /* oh well */ 53 54 spin_lock(&dentry->d_lock); 55 if (dentry->d_fsdata) { 56 /* lost a race */ 57 kmem_cache_free(ceph_dentry_cachep, di); 58 goto out_unlock; 59 } 60 di->dentry = dentry; 61 di->lease_session = NULL; 62 dentry->d_fsdata = di; 63 dentry->d_time = jiffies; 64 ceph_dentry_lru_add(dentry); 65 out_unlock: 66 spin_unlock(&dentry->d_lock); 67 return 0; 68 } 69 70 71 72 /* 73 * for readdir, we encode the directory frag and offset within that 74 * frag into f_pos. 75 */ 76 static unsigned fpos_frag(loff_t p) 77 { 78 return p >> 32; 79 } 80 static unsigned fpos_off(loff_t p) 81 { 82 return p & 0xffffffff; 83 } 84 85 /* 86 * When possible, we try to satisfy a readdir by peeking at the 87 * dcache. We make this work by carefully ordering dentries on 88 * d_u.d_child when we initially get results back from the MDS, and 89 * falling back to a "normal" sync readdir if any dentries in the dir 90 * are dropped. 91 * 92 * I_COMPLETE tells indicates we have all dentries in the dir. It is 93 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by 94 * the MDS if/when the directory is modified). 95 */ 96 static int __dcache_readdir(struct file *filp, 97 void *dirent, filldir_t filldir) 98 { 99 struct ceph_file_info *fi = filp->private_data; 100 struct dentry *parent = filp->f_dentry; 101 struct inode *dir = parent->d_inode; 102 struct list_head *p; 103 struct dentry *dentry, *last; 104 struct ceph_dentry_info *di; 105 int err = 0; 106 107 /* claim ref on last dentry we returned */ 108 last = fi->dentry; 109 fi->dentry = NULL; 110 111 dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos, 112 last); 113 114 spin_lock(&dcache_lock); 115 116 /* start at beginning? */ 117 if (filp->f_pos == 2 || (last && 118 filp->f_pos < ceph_dentry(last)->offset)) { 119 if (list_empty(&parent->d_subdirs)) 120 goto out_unlock; 121 p = parent->d_subdirs.prev; 122 dout(" initial p %p/%p\n", p->prev, p->next); 123 } else { 124 p = last->d_u.d_child.prev; 125 } 126 127 more: 128 dentry = list_entry(p, struct dentry, d_u.d_child); 129 di = ceph_dentry(dentry); 130 while (1) { 131 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next, 132 d_unhashed(dentry) ? "!hashed" : "hashed", 133 parent->d_subdirs.prev, parent->d_subdirs.next); 134 if (p == &parent->d_subdirs) { 135 fi->at_end = 1; 136 goto out_unlock; 137 } 138 if (!d_unhashed(dentry) && dentry->d_inode && 139 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR && 140 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH && 141 filp->f_pos <= di->offset) 142 break; 143 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry, 144 dentry->d_name.len, dentry->d_name.name, di->offset, 145 filp->f_pos, d_unhashed(dentry) ? " unhashed" : "", 146 !dentry->d_inode ? " null" : ""); 147 p = p->prev; 148 dentry = list_entry(p, struct dentry, d_u.d_child); 149 di = ceph_dentry(dentry); 150 } 151 152 atomic_inc(&dentry->d_count); 153 spin_unlock(&dcache_lock); 154 155 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos, 156 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 157 filp->f_pos = di->offset; 158 err = filldir(dirent, dentry->d_name.name, 159 dentry->d_name.len, di->offset, 160 dentry->d_inode->i_ino, 161 dentry->d_inode->i_mode >> 12); 162 163 if (last) { 164 if (err < 0) { 165 /* remember our position */ 166 fi->dentry = last; 167 fi->next_offset = di->offset; 168 } else { 169 dput(last); 170 } 171 } 172 last = dentry; 173 174 if (err < 0) 175 goto out; 176 177 filp->f_pos++; 178 179 /* make sure a dentry wasn't dropped while we didn't have dcache_lock */ 180 if (!ceph_i_test(dir, CEPH_I_COMPLETE)) { 181 dout(" lost I_COMPLETE on %p; falling back to mds\n", dir); 182 err = -EAGAIN; 183 goto out; 184 } 185 186 spin_lock(&dcache_lock); 187 p = p->prev; /* advance to next dentry */ 188 goto more; 189 190 out_unlock: 191 spin_unlock(&dcache_lock); 192 out: 193 if (last) 194 dput(last); 195 return err; 196 } 197 198 /* 199 * make note of the last dentry we read, so we can 200 * continue at the same lexicographical point, 201 * regardless of what dir changes take place on the 202 * server. 203 */ 204 static int note_last_dentry(struct ceph_file_info *fi, const char *name, 205 int len) 206 { 207 kfree(fi->last_name); 208 fi->last_name = kmalloc(len+1, GFP_NOFS); 209 if (!fi->last_name) 210 return -ENOMEM; 211 memcpy(fi->last_name, name, len); 212 fi->last_name[len] = 0; 213 dout("note_last_dentry '%s'\n", fi->last_name); 214 return 0; 215 } 216 217 static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) 218 { 219 struct ceph_file_info *fi = filp->private_data; 220 struct inode *inode = filp->f_dentry->d_inode; 221 struct ceph_inode_info *ci = ceph_inode(inode); 222 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 223 struct ceph_mds_client *mdsc = fsc->mdsc; 224 unsigned frag = fpos_frag(filp->f_pos); 225 int off = fpos_off(filp->f_pos); 226 int err; 227 u32 ftype; 228 struct ceph_mds_reply_info_parsed *rinfo; 229 const int max_entries = fsc->mount_options->max_readdir; 230 const int max_bytes = fsc->mount_options->max_readdir_bytes; 231 232 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off); 233 if (fi->at_end) 234 return 0; 235 236 /* always start with . and .. */ 237 if (filp->f_pos == 0) { 238 /* note dir version at start of readdir so we can tell 239 * if any dentries get dropped */ 240 fi->dir_release_count = ci->i_release_count; 241 242 dout("readdir off 0 -> '.'\n"); 243 if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0), 244 inode->i_ino, inode->i_mode >> 12) < 0) 245 return 0; 246 filp->f_pos = 1; 247 off = 1; 248 } 249 if (filp->f_pos == 1) { 250 dout("readdir off 1 -> '..'\n"); 251 if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1), 252 filp->f_dentry->d_parent->d_inode->i_ino, 253 inode->i_mode >> 12) < 0) 254 return 0; 255 filp->f_pos = 2; 256 off = 2; 257 } 258 259 /* can we use the dcache? */ 260 spin_lock(&inode->i_lock); 261 if ((filp->f_pos == 2 || fi->dentry) && 262 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && 263 ceph_snap(inode) != CEPH_SNAPDIR && 264 (ci->i_ceph_flags & CEPH_I_COMPLETE) && 265 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { 266 spin_unlock(&inode->i_lock); 267 err = __dcache_readdir(filp, dirent, filldir); 268 if (err != -EAGAIN) 269 return err; 270 } else { 271 spin_unlock(&inode->i_lock); 272 } 273 if (fi->dentry) { 274 err = note_last_dentry(fi, fi->dentry->d_name.name, 275 fi->dentry->d_name.len); 276 if (err) 277 return err; 278 dput(fi->dentry); 279 fi->dentry = NULL; 280 } 281 282 /* proceed with a normal readdir */ 283 284 more: 285 /* do we have the correct frag content buffered? */ 286 if (fi->frag != frag || fi->last_readdir == NULL) { 287 struct ceph_mds_request *req; 288 int op = ceph_snap(inode) == CEPH_SNAPDIR ? 289 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR; 290 291 /* discard old result, if any */ 292 if (fi->last_readdir) { 293 ceph_mdsc_put_request(fi->last_readdir); 294 fi->last_readdir = NULL; 295 } 296 297 /* requery frag tree, as the frag topology may have changed */ 298 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL); 299 300 dout("readdir fetching %llx.%llx frag %x offset '%s'\n", 301 ceph_vinop(inode), frag, fi->last_name); 302 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 303 if (IS_ERR(req)) 304 return PTR_ERR(req); 305 req->r_inode = igrab(inode); 306 req->r_dentry = dget(filp->f_dentry); 307 /* hints to request -> mds selection code */ 308 req->r_direct_mode = USE_AUTH_MDS; 309 req->r_direct_hash = ceph_frag_value(frag); 310 req->r_direct_is_hash = true; 311 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS); 312 req->r_readdir_offset = fi->next_offset; 313 req->r_args.readdir.frag = cpu_to_le32(frag); 314 req->r_args.readdir.max_entries = cpu_to_le32(max_entries); 315 req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes); 316 req->r_num_caps = max_entries + 1; 317 err = ceph_mdsc_do_request(mdsc, NULL, req); 318 if (err < 0) { 319 ceph_mdsc_put_request(req); 320 return err; 321 } 322 dout("readdir got and parsed readdir result=%d" 323 " on frag %x, end=%d, complete=%d\n", err, frag, 324 (int)req->r_reply_info.dir_end, 325 (int)req->r_reply_info.dir_complete); 326 327 if (!req->r_did_prepopulate) { 328 dout("readdir !did_prepopulate"); 329 fi->dir_release_count--; /* preclude I_COMPLETE */ 330 } 331 332 /* note next offset and last dentry name */ 333 fi->offset = fi->next_offset; 334 fi->last_readdir = req; 335 336 if (req->r_reply_info.dir_end) { 337 kfree(fi->last_name); 338 fi->last_name = NULL; 339 if (ceph_frag_is_rightmost(frag)) 340 fi->next_offset = 2; 341 else 342 fi->next_offset = 0; 343 } else { 344 rinfo = &req->r_reply_info; 345 err = note_last_dentry(fi, 346 rinfo->dir_dname[rinfo->dir_nr-1], 347 rinfo->dir_dname_len[rinfo->dir_nr-1]); 348 if (err) 349 return err; 350 fi->next_offset += rinfo->dir_nr; 351 } 352 } 353 354 rinfo = &fi->last_readdir->r_reply_info; 355 dout("readdir frag %x num %d off %d chunkoff %d\n", frag, 356 rinfo->dir_nr, off, fi->offset); 357 while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) { 358 u64 pos = ceph_make_fpos(frag, off); 359 struct ceph_mds_reply_inode *in = 360 rinfo->dir_in[off - fi->offset].in; 361 struct ceph_vino vino; 362 ino_t ino; 363 364 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n", 365 off, off - fi->offset, rinfo->dir_nr, pos, 366 rinfo->dir_dname_len[off - fi->offset], 367 rinfo->dir_dname[off - fi->offset], in); 368 BUG_ON(!in); 369 ftype = le32_to_cpu(in->mode) >> 12; 370 vino.ino = le64_to_cpu(in->ino); 371 vino.snap = le64_to_cpu(in->snapid); 372 ino = ceph_vino_to_ino(vino); 373 if (filldir(dirent, 374 rinfo->dir_dname[off - fi->offset], 375 rinfo->dir_dname_len[off - fi->offset], 376 pos, ino, ftype) < 0) { 377 dout("filldir stopping us...\n"); 378 return 0; 379 } 380 off++; 381 filp->f_pos = pos + 1; 382 } 383 384 if (fi->last_name) { 385 ceph_mdsc_put_request(fi->last_readdir); 386 fi->last_readdir = NULL; 387 goto more; 388 } 389 390 /* more frags? */ 391 if (!ceph_frag_is_rightmost(frag)) { 392 frag = ceph_frag_next(frag); 393 off = 0; 394 filp->f_pos = ceph_make_fpos(frag, off); 395 dout("readdir next frag is %x\n", frag); 396 goto more; 397 } 398 fi->at_end = 1; 399 400 /* 401 * if dir_release_count still matches the dir, no dentries 402 * were released during the whole readdir, and we should have 403 * the complete dir contents in our cache. 404 */ 405 spin_lock(&inode->i_lock); 406 if (ci->i_release_count == fi->dir_release_count) { 407 dout(" marking %p complete\n", inode); 408 ci->i_ceph_flags |= CEPH_I_COMPLETE; 409 ci->i_max_offset = filp->f_pos; 410 } 411 spin_unlock(&inode->i_lock); 412 413 dout("readdir %p filp %p done.\n", inode, filp); 414 return 0; 415 } 416 417 static void reset_readdir(struct ceph_file_info *fi) 418 { 419 if (fi->last_readdir) { 420 ceph_mdsc_put_request(fi->last_readdir); 421 fi->last_readdir = NULL; 422 } 423 kfree(fi->last_name); 424 fi->last_name = NULL; 425 fi->next_offset = 2; /* compensate for . and .. */ 426 if (fi->dentry) { 427 dput(fi->dentry); 428 fi->dentry = NULL; 429 } 430 fi->at_end = 0; 431 } 432 433 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin) 434 { 435 struct ceph_file_info *fi = file->private_data; 436 struct inode *inode = file->f_mapping->host; 437 loff_t old_offset = offset; 438 loff_t retval; 439 440 mutex_lock(&inode->i_mutex); 441 switch (origin) { 442 case SEEK_END: 443 offset += inode->i_size + 2; /* FIXME */ 444 break; 445 case SEEK_CUR: 446 offset += file->f_pos; 447 } 448 retval = -EINVAL; 449 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) { 450 if (offset != file->f_pos) { 451 file->f_pos = offset; 452 file->f_version = 0; 453 fi->at_end = 0; 454 } 455 retval = offset; 456 457 /* 458 * discard buffered readdir content on seekdir(0), or 459 * seek to new frag, or seek prior to current chunk. 460 */ 461 if (offset == 0 || 462 fpos_frag(offset) != fpos_frag(old_offset) || 463 fpos_off(offset) < fi->offset) { 464 dout("dir_llseek dropping %p content\n", file); 465 reset_readdir(fi); 466 } 467 468 /* bump dir_release_count if we did a forward seek */ 469 if (offset > old_offset) 470 fi->dir_release_count--; 471 } 472 mutex_unlock(&inode->i_mutex); 473 return retval; 474 } 475 476 /* 477 * Process result of a lookup/open request. 478 * 479 * Mainly, make sure we return the final req->r_dentry (if it already 480 * existed) in place of the original VFS-provided dentry when they 481 * differ. 482 * 483 * Gracefully handle the case where the MDS replies with -ENOENT and 484 * no trace (which it may do, at its discretion, e.g., if it doesn't 485 * care to issue a lease on the negative dentry). 486 */ 487 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, 488 struct dentry *dentry, int err) 489 { 490 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); 491 struct inode *parent = dentry->d_parent->d_inode; 492 493 /* .snap dir? */ 494 if (err == -ENOENT && 495 strcmp(dentry->d_name.name, 496 fsc->mount_options->snapdir_name) == 0) { 497 struct inode *inode = ceph_get_snapdir(parent); 498 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", 499 dentry, dentry->d_name.len, dentry->d_name.name, inode); 500 BUG_ON(!d_unhashed(dentry)); 501 d_add(dentry, inode); 502 err = 0; 503 } 504 505 if (err == -ENOENT) { 506 /* no trace? */ 507 err = 0; 508 if (!req->r_reply_info.head->is_dentry) { 509 dout("ENOENT and no trace, dentry %p inode %p\n", 510 dentry, dentry->d_inode); 511 if (dentry->d_inode) { 512 d_drop(dentry); 513 err = -ENOENT; 514 } else { 515 d_add(dentry, NULL); 516 } 517 } 518 } 519 if (err) 520 dentry = ERR_PTR(err); 521 else if (dentry != req->r_dentry) 522 dentry = dget(req->r_dentry); /* we got spliced */ 523 else 524 dentry = NULL; 525 return dentry; 526 } 527 528 static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry) 529 { 530 return ceph_ino(inode) == CEPH_INO_ROOT && 531 strncmp(dentry->d_name.name, ".ceph", 5) == 0; 532 } 533 534 /* 535 * Look up a single dir entry. If there is a lookup intent, inform 536 * the MDS so that it gets our 'caps wanted' value in a single op. 537 */ 538 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, 539 struct nameidata *nd) 540 { 541 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 542 struct ceph_mds_client *mdsc = fsc->mdsc; 543 struct ceph_mds_request *req; 544 int op; 545 int err; 546 547 dout("lookup %p dentry %p '%.*s'\n", 548 dir, dentry, dentry->d_name.len, dentry->d_name.name); 549 550 if (dentry->d_name.len > NAME_MAX) 551 return ERR_PTR(-ENAMETOOLONG); 552 553 err = ceph_init_dentry(dentry); 554 if (err < 0) 555 return ERR_PTR(err); 556 557 /* open (but not create!) intent? */ 558 if (nd && 559 (nd->flags & LOOKUP_OPEN) && 560 (nd->flags & LOOKUP_CONTINUE) == 0 && /* only open last component */ 561 !(nd->intent.open.flags & O_CREAT)) { 562 int mode = nd->intent.open.create_mode & ~current->fs->umask; 563 return ceph_lookup_open(dir, dentry, nd, mode, 1); 564 } 565 566 /* can we conclude ENOENT locally? */ 567 if (dentry->d_inode == NULL) { 568 struct ceph_inode_info *ci = ceph_inode(dir); 569 struct ceph_dentry_info *di = ceph_dentry(dentry); 570 571 spin_lock(&dir->i_lock); 572 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags); 573 if (strncmp(dentry->d_name.name, 574 fsc->mount_options->snapdir_name, 575 dentry->d_name.len) && 576 !is_root_ceph_dentry(dir, dentry) && 577 (ci->i_ceph_flags & CEPH_I_COMPLETE) && 578 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { 579 spin_unlock(&dir->i_lock); 580 dout(" dir %p complete, -ENOENT\n", dir); 581 d_add(dentry, NULL); 582 di->lease_shared_gen = ci->i_shared_gen; 583 return NULL; 584 } 585 spin_unlock(&dir->i_lock); 586 } 587 588 op = ceph_snap(dir) == CEPH_SNAPDIR ? 589 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP; 590 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); 591 if (IS_ERR(req)) 592 return ERR_CAST(req); 593 req->r_dentry = dget(dentry); 594 req->r_num_caps = 2; 595 /* we only need inode linkage */ 596 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); 597 req->r_locked_dir = dir; 598 err = ceph_mdsc_do_request(mdsc, NULL, req); 599 dentry = ceph_finish_lookup(req, dentry, err); 600 ceph_mdsc_put_request(req); /* will dput(dentry) */ 601 dout("lookup result=%p\n", dentry); 602 return dentry; 603 } 604 605 /* 606 * If we do a create but get no trace back from the MDS, follow up with 607 * a lookup (the VFS expects us to link up the provided dentry). 608 */ 609 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) 610 { 611 struct dentry *result = ceph_lookup(dir, dentry, NULL); 612 613 if (result && !IS_ERR(result)) { 614 /* 615 * We created the item, then did a lookup, and found 616 * it was already linked to another inode we already 617 * had in our cache (and thus got spliced). Link our 618 * dentry to that inode, but don't hash it, just in 619 * case the VFS wants to dereference it. 620 */ 621 BUG_ON(!result->d_inode); 622 d_instantiate(dentry, result->d_inode); 623 return 0; 624 } 625 return PTR_ERR(result); 626 } 627 628 static int ceph_mknod(struct inode *dir, struct dentry *dentry, 629 int mode, dev_t rdev) 630 { 631 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 632 struct ceph_mds_client *mdsc = fsc->mdsc; 633 struct ceph_mds_request *req; 634 int err; 635 636 if (ceph_snap(dir) != CEPH_NOSNAP) 637 return -EROFS; 638 639 dout("mknod in dir %p dentry %p mode 0%o rdev %d\n", 640 dir, dentry, mode, rdev); 641 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS); 642 if (IS_ERR(req)) { 643 d_drop(dentry); 644 return PTR_ERR(req); 645 } 646 req->r_dentry = dget(dentry); 647 req->r_num_caps = 2; 648 req->r_locked_dir = dir; 649 req->r_args.mknod.mode = cpu_to_le32(mode); 650 req->r_args.mknod.rdev = cpu_to_le32(rdev); 651 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 652 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 653 err = ceph_mdsc_do_request(mdsc, dir, req); 654 if (!err && !req->r_reply_info.head->is_dentry) 655 err = ceph_handle_notrace_create(dir, dentry); 656 ceph_mdsc_put_request(req); 657 if (err) 658 d_drop(dentry); 659 return err; 660 } 661 662 static int ceph_create(struct inode *dir, struct dentry *dentry, int mode, 663 struct nameidata *nd) 664 { 665 dout("create in dir %p dentry %p name '%.*s'\n", 666 dir, dentry, dentry->d_name.len, dentry->d_name.name); 667 668 if (ceph_snap(dir) != CEPH_NOSNAP) 669 return -EROFS; 670 671 if (nd) { 672 BUG_ON((nd->flags & LOOKUP_OPEN) == 0); 673 dentry = ceph_lookup_open(dir, dentry, nd, mode, 0); 674 /* hrm, what should i do here if we get aliased? */ 675 if (IS_ERR(dentry)) 676 return PTR_ERR(dentry); 677 return 0; 678 } 679 680 /* fall back to mknod */ 681 return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0); 682 } 683 684 static int ceph_symlink(struct inode *dir, struct dentry *dentry, 685 const char *dest) 686 { 687 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 688 struct ceph_mds_client *mdsc = fsc->mdsc; 689 struct ceph_mds_request *req; 690 int err; 691 692 if (ceph_snap(dir) != CEPH_NOSNAP) 693 return -EROFS; 694 695 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest); 696 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS); 697 if (IS_ERR(req)) { 698 d_drop(dentry); 699 return PTR_ERR(req); 700 } 701 req->r_dentry = dget(dentry); 702 req->r_num_caps = 2; 703 req->r_path2 = kstrdup(dest, GFP_NOFS); 704 req->r_locked_dir = dir; 705 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 706 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 707 err = ceph_mdsc_do_request(mdsc, dir, req); 708 if (!err && !req->r_reply_info.head->is_dentry) 709 err = ceph_handle_notrace_create(dir, dentry); 710 ceph_mdsc_put_request(req); 711 if (err) 712 d_drop(dentry); 713 return err; 714 } 715 716 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode) 717 { 718 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 719 struct ceph_mds_client *mdsc = fsc->mdsc; 720 struct ceph_mds_request *req; 721 int err = -EROFS; 722 int op; 723 724 if (ceph_snap(dir) == CEPH_SNAPDIR) { 725 /* mkdir .snap/foo is a MKSNAP */ 726 op = CEPH_MDS_OP_MKSNAP; 727 dout("mksnap dir %p snap '%.*s' dn %p\n", dir, 728 dentry->d_name.len, dentry->d_name.name, dentry); 729 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 730 dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode); 731 op = CEPH_MDS_OP_MKDIR; 732 } else { 733 goto out; 734 } 735 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 736 if (IS_ERR(req)) { 737 err = PTR_ERR(req); 738 goto out; 739 } 740 741 req->r_dentry = dget(dentry); 742 req->r_num_caps = 2; 743 req->r_locked_dir = dir; 744 req->r_args.mkdir.mode = cpu_to_le32(mode); 745 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 746 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 747 err = ceph_mdsc_do_request(mdsc, dir, req); 748 if (!err && !req->r_reply_info.head->is_dentry) 749 err = ceph_handle_notrace_create(dir, dentry); 750 ceph_mdsc_put_request(req); 751 out: 752 if (err < 0) 753 d_drop(dentry); 754 return err; 755 } 756 757 static int ceph_link(struct dentry *old_dentry, struct inode *dir, 758 struct dentry *dentry) 759 { 760 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 761 struct ceph_mds_client *mdsc = fsc->mdsc; 762 struct ceph_mds_request *req; 763 int err; 764 765 if (ceph_snap(dir) != CEPH_NOSNAP) 766 return -EROFS; 767 768 dout("link in dir %p old_dentry %p dentry %p\n", dir, 769 old_dentry, dentry); 770 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS); 771 if (IS_ERR(req)) { 772 d_drop(dentry); 773 return PTR_ERR(req); 774 } 775 req->r_dentry = dget(dentry); 776 req->r_num_caps = 2; 777 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */ 778 req->r_locked_dir = dir; 779 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 780 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 781 err = ceph_mdsc_do_request(mdsc, dir, req); 782 if (err) 783 d_drop(dentry); 784 else if (!req->r_reply_info.head->is_dentry) 785 d_instantiate(dentry, igrab(old_dentry->d_inode)); 786 ceph_mdsc_put_request(req); 787 return err; 788 } 789 790 /* 791 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it 792 * looks like the link count will hit 0, drop any other caps (other 793 * than PIN) we don't specifically want (due to the file still being 794 * open). 795 */ 796 static int drop_caps_for_unlink(struct inode *inode) 797 { 798 struct ceph_inode_info *ci = ceph_inode(inode); 799 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL; 800 801 spin_lock(&inode->i_lock); 802 if (inode->i_nlink == 1) { 803 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN); 804 ci->i_ceph_flags |= CEPH_I_NODELAY; 805 } 806 spin_unlock(&inode->i_lock); 807 return drop; 808 } 809 810 /* 811 * rmdir and unlink are differ only by the metadata op code 812 */ 813 static int ceph_unlink(struct inode *dir, struct dentry *dentry) 814 { 815 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb); 816 struct ceph_mds_client *mdsc = fsc->mdsc; 817 struct inode *inode = dentry->d_inode; 818 struct ceph_mds_request *req; 819 int err = -EROFS; 820 int op; 821 822 if (ceph_snap(dir) == CEPH_SNAPDIR) { 823 /* rmdir .snap/foo is RMSNAP */ 824 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len, 825 dentry->d_name.name, dentry); 826 op = CEPH_MDS_OP_RMSNAP; 827 } else if (ceph_snap(dir) == CEPH_NOSNAP) { 828 dout("unlink/rmdir dir %p dn %p inode %p\n", 829 dir, dentry, inode); 830 op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ? 831 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; 832 } else 833 goto out; 834 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 835 if (IS_ERR(req)) { 836 err = PTR_ERR(req); 837 goto out; 838 } 839 req->r_dentry = dget(dentry); 840 req->r_num_caps = 2; 841 req->r_locked_dir = dir; 842 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 843 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 844 req->r_inode_drop = drop_caps_for_unlink(inode); 845 err = ceph_mdsc_do_request(mdsc, dir, req); 846 if (!err && !req->r_reply_info.head->is_dentry) 847 d_delete(dentry); 848 ceph_mdsc_put_request(req); 849 out: 850 return err; 851 } 852 853 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, 854 struct inode *new_dir, struct dentry *new_dentry) 855 { 856 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb); 857 struct ceph_mds_client *mdsc = fsc->mdsc; 858 struct ceph_mds_request *req; 859 int err; 860 861 if (ceph_snap(old_dir) != ceph_snap(new_dir)) 862 return -EXDEV; 863 if (ceph_snap(old_dir) != CEPH_NOSNAP || 864 ceph_snap(new_dir) != CEPH_NOSNAP) 865 return -EROFS; 866 dout("rename dir %p dentry %p to dir %p dentry %p\n", 867 old_dir, old_dentry, new_dir, new_dentry); 868 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS); 869 if (IS_ERR(req)) 870 return PTR_ERR(req); 871 req->r_dentry = dget(new_dentry); 872 req->r_num_caps = 2; 873 req->r_old_dentry = dget(old_dentry); 874 req->r_locked_dir = new_dir; 875 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED; 876 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL; 877 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 878 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 879 /* release LINK_RDCACHE on source inode (mds will lock it) */ 880 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED; 881 if (new_dentry->d_inode) 882 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode); 883 err = ceph_mdsc_do_request(mdsc, old_dir, req); 884 if (!err && !req->r_reply_info.head->is_dentry) { 885 /* 886 * Normally d_move() is done by fill_trace (called by 887 * do_request, above). If there is no trace, we need 888 * to do it here. 889 */ 890 891 /* d_move screws up d_subdirs order */ 892 ceph_i_clear(new_dir, CEPH_I_COMPLETE); 893 894 d_move(old_dentry, new_dentry); 895 896 /* ensure target dentry is invalidated, despite 897 rehashing bug in vfs_rename_dir */ 898 ceph_invalidate_dentry_lease(new_dentry); 899 } 900 ceph_mdsc_put_request(req); 901 return err; 902 } 903 904 /* 905 * Ensure a dentry lease will no longer revalidate. 906 */ 907 void ceph_invalidate_dentry_lease(struct dentry *dentry) 908 { 909 spin_lock(&dentry->d_lock); 910 dentry->d_time = jiffies; 911 ceph_dentry(dentry)->lease_shared_gen = 0; 912 spin_unlock(&dentry->d_lock); 913 } 914 915 /* 916 * Check if dentry lease is valid. If not, delete the lease. Try to 917 * renew if the least is more than half up. 918 */ 919 static int dentry_lease_is_valid(struct dentry *dentry) 920 { 921 struct ceph_dentry_info *di; 922 struct ceph_mds_session *s; 923 int valid = 0; 924 u32 gen; 925 unsigned long ttl; 926 struct ceph_mds_session *session = NULL; 927 struct inode *dir = NULL; 928 u32 seq = 0; 929 930 spin_lock(&dentry->d_lock); 931 di = ceph_dentry(dentry); 932 if (di && di->lease_session) { 933 s = di->lease_session; 934 spin_lock(&s->s_cap_lock); 935 gen = s->s_cap_gen; 936 ttl = s->s_cap_ttl; 937 spin_unlock(&s->s_cap_lock); 938 939 if (di->lease_gen == gen && 940 time_before(jiffies, dentry->d_time) && 941 time_before(jiffies, ttl)) { 942 valid = 1; 943 if (di->lease_renew_after && 944 time_after(jiffies, di->lease_renew_after)) { 945 /* we should renew */ 946 dir = dentry->d_parent->d_inode; 947 session = ceph_get_mds_session(s); 948 seq = di->lease_seq; 949 di->lease_renew_after = 0; 950 di->lease_renew_from = jiffies; 951 } 952 } 953 } 954 spin_unlock(&dentry->d_lock); 955 956 if (session) { 957 ceph_mdsc_lease_send_msg(session, dir, dentry, 958 CEPH_MDS_LEASE_RENEW, seq); 959 ceph_put_mds_session(session); 960 } 961 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid); 962 return valid; 963 } 964 965 /* 966 * Check if directory-wide content lease/cap is valid. 967 */ 968 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry) 969 { 970 struct ceph_inode_info *ci = ceph_inode(dir); 971 struct ceph_dentry_info *di = ceph_dentry(dentry); 972 int valid = 0; 973 974 spin_lock(&dir->i_lock); 975 if (ci->i_shared_gen == di->lease_shared_gen) 976 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1); 977 spin_unlock(&dir->i_lock); 978 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n", 979 dir, (unsigned)ci->i_shared_gen, dentry, 980 (unsigned)di->lease_shared_gen, valid); 981 return valid; 982 } 983 984 /* 985 * Check if cached dentry can be trusted. 986 */ 987 static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd) 988 { 989 struct inode *dir = dentry->d_parent->d_inode; 990 991 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry, 992 dentry->d_name.len, dentry->d_name.name, dentry->d_inode, 993 ceph_dentry(dentry)->offset); 994 995 /* always trust cached snapped dentries, snapdir dentry */ 996 if (ceph_snap(dir) != CEPH_NOSNAP) { 997 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry, 998 dentry->d_name.len, dentry->d_name.name, dentry->d_inode); 999 goto out_touch; 1000 } 1001 if (dentry->d_inode && ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) 1002 goto out_touch; 1003 1004 if (dentry_lease_is_valid(dentry) || 1005 dir_lease_is_valid(dir, dentry)) 1006 goto out_touch; 1007 1008 dout("d_revalidate %p invalid\n", dentry); 1009 d_drop(dentry); 1010 return 0; 1011 out_touch: 1012 ceph_dentry_lru_touch(dentry); 1013 return 1; 1014 } 1015 1016 /* 1017 * When a dentry is released, clear the dir I_COMPLETE if it was part 1018 * of the current dir gen or if this is in the snapshot namespace. 1019 */ 1020 static void ceph_dentry_release(struct dentry *dentry) 1021 { 1022 struct ceph_dentry_info *di = ceph_dentry(dentry); 1023 struct inode *parent_inode = NULL; 1024 u64 snapid = CEPH_NOSNAP; 1025 1026 if (!IS_ROOT(dentry)) { 1027 parent_inode = dentry->d_parent->d_inode; 1028 if (parent_inode) 1029 snapid = ceph_snap(parent_inode); 1030 } 1031 dout("dentry_release %p parent %p\n", dentry, parent_inode); 1032 if (parent_inode && snapid != CEPH_SNAPDIR) { 1033 struct ceph_inode_info *ci = ceph_inode(parent_inode); 1034 1035 spin_lock(&parent_inode->i_lock); 1036 if (ci->i_shared_gen == di->lease_shared_gen || 1037 snapid <= CEPH_MAXSNAP) { 1038 dout(" clearing %p complete (d_release)\n", 1039 parent_inode); 1040 ci->i_ceph_flags &= ~CEPH_I_COMPLETE; 1041 ci->i_release_count++; 1042 } 1043 spin_unlock(&parent_inode->i_lock); 1044 } 1045 if (di) { 1046 ceph_dentry_lru_del(dentry); 1047 if (di->lease_session) 1048 ceph_put_mds_session(di->lease_session); 1049 kmem_cache_free(ceph_dentry_cachep, di); 1050 dentry->d_fsdata = NULL; 1051 } 1052 } 1053 1054 static int ceph_snapdir_d_revalidate(struct dentry *dentry, 1055 struct nameidata *nd) 1056 { 1057 /* 1058 * Eventually, we'll want to revalidate snapped metadata 1059 * too... probably... 1060 */ 1061 return 1; 1062 } 1063 1064 1065 1066 /* 1067 * read() on a dir. This weird interface hack only works if mounted 1068 * with '-o dirstat'. 1069 */ 1070 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size, 1071 loff_t *ppos) 1072 { 1073 struct ceph_file_info *cf = file->private_data; 1074 struct inode *inode = file->f_dentry->d_inode; 1075 struct ceph_inode_info *ci = ceph_inode(inode); 1076 int left; 1077 1078 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) 1079 return -EISDIR; 1080 1081 if (!cf->dir_info) { 1082 cf->dir_info = kmalloc(1024, GFP_NOFS); 1083 if (!cf->dir_info) 1084 return -ENOMEM; 1085 cf->dir_info_len = 1086 sprintf(cf->dir_info, 1087 "entries: %20lld\n" 1088 " files: %20lld\n" 1089 " subdirs: %20lld\n" 1090 "rentries: %20lld\n" 1091 " rfiles: %20lld\n" 1092 " rsubdirs: %20lld\n" 1093 "rbytes: %20lld\n" 1094 "rctime: %10ld.%09ld\n", 1095 ci->i_files + ci->i_subdirs, 1096 ci->i_files, 1097 ci->i_subdirs, 1098 ci->i_rfiles + ci->i_rsubdirs, 1099 ci->i_rfiles, 1100 ci->i_rsubdirs, 1101 ci->i_rbytes, 1102 (long)ci->i_rctime.tv_sec, 1103 (long)ci->i_rctime.tv_nsec); 1104 } 1105 1106 if (*ppos >= cf->dir_info_len) 1107 return 0; 1108 size = min_t(unsigned, size, cf->dir_info_len-*ppos); 1109 left = copy_to_user(buf, cf->dir_info + *ppos, size); 1110 if (left == size) 1111 return -EFAULT; 1112 *ppos += (size - left); 1113 return size - left; 1114 } 1115 1116 /* 1117 * an fsync() on a dir will wait for any uncommitted directory 1118 * operations to commit. 1119 */ 1120 static int ceph_dir_fsync(struct file *file, int datasync) 1121 { 1122 struct inode *inode = file->f_path.dentry->d_inode; 1123 struct ceph_inode_info *ci = ceph_inode(inode); 1124 struct list_head *head = &ci->i_unsafe_dirops; 1125 struct ceph_mds_request *req; 1126 u64 last_tid; 1127 int ret = 0; 1128 1129 dout("dir_fsync %p\n", inode); 1130 spin_lock(&ci->i_unsafe_lock); 1131 if (list_empty(head)) 1132 goto out; 1133 1134 req = list_entry(head->prev, 1135 struct ceph_mds_request, r_unsafe_dir_item); 1136 last_tid = req->r_tid; 1137 1138 do { 1139 ceph_mdsc_get_request(req); 1140 spin_unlock(&ci->i_unsafe_lock); 1141 dout("dir_fsync %p wait on tid %llu (until %llu)\n", 1142 inode, req->r_tid, last_tid); 1143 if (req->r_timeout) { 1144 ret = wait_for_completion_timeout( 1145 &req->r_safe_completion, req->r_timeout); 1146 if (ret > 0) 1147 ret = 0; 1148 else if (ret == 0) 1149 ret = -EIO; /* timed out */ 1150 } else { 1151 wait_for_completion(&req->r_safe_completion); 1152 } 1153 spin_lock(&ci->i_unsafe_lock); 1154 ceph_mdsc_put_request(req); 1155 1156 if (ret || list_empty(head)) 1157 break; 1158 req = list_entry(head->next, 1159 struct ceph_mds_request, r_unsafe_dir_item); 1160 } while (req->r_tid < last_tid); 1161 out: 1162 spin_unlock(&ci->i_unsafe_lock); 1163 return ret; 1164 } 1165 1166 /* 1167 * We maintain a private dentry LRU. 1168 * 1169 * FIXME: this needs to be changed to a per-mds lru to be useful. 1170 */ 1171 void ceph_dentry_lru_add(struct dentry *dn) 1172 { 1173 struct ceph_dentry_info *di = ceph_dentry(dn); 1174 struct ceph_mds_client *mdsc; 1175 1176 dout("dentry_lru_add %p %p '%.*s'\n", di, dn, 1177 dn->d_name.len, dn->d_name.name); 1178 if (di) { 1179 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1180 spin_lock(&mdsc->dentry_lru_lock); 1181 list_add_tail(&di->lru, &mdsc->dentry_lru); 1182 mdsc->num_dentry++; 1183 spin_unlock(&mdsc->dentry_lru_lock); 1184 } 1185 } 1186 1187 void ceph_dentry_lru_touch(struct dentry *dn) 1188 { 1189 struct ceph_dentry_info *di = ceph_dentry(dn); 1190 struct ceph_mds_client *mdsc; 1191 1192 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn, 1193 dn->d_name.len, dn->d_name.name, di->offset); 1194 if (di) { 1195 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1196 spin_lock(&mdsc->dentry_lru_lock); 1197 list_move_tail(&di->lru, &mdsc->dentry_lru); 1198 spin_unlock(&mdsc->dentry_lru_lock); 1199 } 1200 } 1201 1202 void ceph_dentry_lru_del(struct dentry *dn) 1203 { 1204 struct ceph_dentry_info *di = ceph_dentry(dn); 1205 struct ceph_mds_client *mdsc; 1206 1207 dout("dentry_lru_del %p %p '%.*s'\n", di, dn, 1208 dn->d_name.len, dn->d_name.name); 1209 if (di) { 1210 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc; 1211 spin_lock(&mdsc->dentry_lru_lock); 1212 list_del_init(&di->lru); 1213 mdsc->num_dentry--; 1214 spin_unlock(&mdsc->dentry_lru_lock); 1215 } 1216 } 1217 1218 const struct file_operations ceph_dir_fops = { 1219 .read = ceph_read_dir, 1220 .readdir = ceph_readdir, 1221 .llseek = ceph_dir_llseek, 1222 .open = ceph_open, 1223 .release = ceph_release, 1224 .unlocked_ioctl = ceph_ioctl, 1225 .fsync = ceph_dir_fsync, 1226 }; 1227 1228 const struct inode_operations ceph_dir_iops = { 1229 .lookup = ceph_lookup, 1230 .permission = ceph_permission, 1231 .getattr = ceph_getattr, 1232 .setattr = ceph_setattr, 1233 .setxattr = ceph_setxattr, 1234 .getxattr = ceph_getxattr, 1235 .listxattr = ceph_listxattr, 1236 .removexattr = ceph_removexattr, 1237 .mknod = ceph_mknod, 1238 .symlink = ceph_symlink, 1239 .mkdir = ceph_mkdir, 1240 .link = ceph_link, 1241 .unlink = ceph_unlink, 1242 .rmdir = ceph_unlink, 1243 .rename = ceph_rename, 1244 .create = ceph_create, 1245 }; 1246 1247 const struct dentry_operations ceph_dentry_ops = { 1248 .d_revalidate = ceph_d_revalidate, 1249 .d_release = ceph_dentry_release, 1250 }; 1251 1252 const struct dentry_operations ceph_snapdir_dentry_ops = { 1253 .d_revalidate = ceph_snapdir_d_revalidate, 1254 .d_release = ceph_dentry_release, 1255 }; 1256 1257 const struct dentry_operations ceph_snap_dentry_ops = { 1258 .d_release = ceph_dentry_release, 1259 }; 1260