1 /* -*- mode: c; c-basic-offset: 8; -*- 2 * vim: noexpandtab sw=8 ts=8 sts=0: 3 * 4 * dcache.c 5 * 6 * dentry cache handling code 7 * 8 * Copyright (C) 2002, 2004 Oracle. All rights reserved. 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public 12 * License as published by the Free Software Foundation; either 13 * version 2 of the License, or (at your option) any later version. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 * General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public 21 * License along with this program; if not, write to the 22 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 23 * Boston, MA 021110-1307, USA. 24 */ 25 26 #include <linux/fs.h> 27 #include <linux/types.h> 28 #include <linux/slab.h> 29 #include <linux/namei.h> 30 31 #define MLOG_MASK_PREFIX ML_DCACHE 32 #include <cluster/masklog.h> 33 34 #include "ocfs2.h" 35 36 #include "alloc.h" 37 #include "dcache.h" 38 #include "dlmglue.h" 39 #include "file.h" 40 #include "inode.h" 41 #include "super.h" 42 43 44 static int ocfs2_dentry_revalidate(struct dentry *dentry, 45 struct nameidata *nd) 46 { 47 struct inode *inode = dentry->d_inode; 48 int ret = 0; /* if all else fails, just return false */ 49 struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); 50 51 mlog_entry("(0x%p, '%.*s')\n", dentry, 52 dentry->d_name.len, dentry->d_name.name); 53 54 /* Never trust a negative dentry - force a new lookup. */ 55 if (inode == NULL) { 56 mlog(0, "negative dentry: %.*s\n", dentry->d_name.len, 57 dentry->d_name.name); 58 goto bail; 59 } 60 61 BUG_ON(!osb); 62 63 if (inode == osb->root_inode || is_bad_inode(inode)) 64 goto bail; 65 66 spin_lock(&OCFS2_I(inode)->ip_lock); 67 /* did we or someone else delete this inode? */ 68 if (OCFS2_I(inode)->ip_flags & OCFS2_INODE_DELETED) { 69 spin_unlock(&OCFS2_I(inode)->ip_lock); 70 mlog(0, "inode (%llu) deleted, returning false\n", 71 (unsigned long long)OCFS2_I(inode)->ip_blkno); 72 goto bail; 73 } 74 spin_unlock(&OCFS2_I(inode)->ip_lock); 75 76 /* 77 * We don't need a cluster lock to test this because once an 78 * inode nlink hits zero, it never goes back. 79 */ 80 if (inode->i_nlink == 0) { 81 mlog(0, "Inode %llu orphaned, returning false " 82 "dir = %d\n", 83 (unsigned long long)OCFS2_I(inode)->ip_blkno, 84 S_ISDIR(inode->i_mode)); 85 goto bail; 86 } 87 88 ret = 1; 89 90 bail: 91 mlog_exit(ret); 92 93 return ret; 94 } 95 96 static int ocfs2_match_dentry(struct dentry *dentry, 97 u64 parent_blkno, 98 int skip_unhashed) 99 { 100 struct inode *parent; 101 102 /* 103 * ocfs2_lookup() does a d_splice_alias() _before_ attaching 104 * to the lock data, so we skip those here, otherwise 105 * ocfs2_dentry_attach_lock() will get its original dentry 106 * back. 107 */ 108 if (!dentry->d_fsdata) 109 return 0; 110 111 if (!dentry->d_parent) 112 return 0; 113 114 if (skip_unhashed && d_unhashed(dentry)) 115 return 0; 116 117 parent = dentry->d_parent->d_inode; 118 /* Negative parent dentry? */ 119 if (!parent) 120 return 0; 121 122 /* Name is in a different directory. */ 123 if (OCFS2_I(parent)->ip_blkno != parent_blkno) 124 return 0; 125 126 return 1; 127 } 128 129 /* 130 * Walk the inode alias list, and find a dentry which has a given 131 * parent. ocfs2_dentry_attach_lock() wants to find _any_ alias as it 132 * is looking for a dentry_lock reference. The downconvert thread is 133 * looking to unhash aliases, so we allow it to skip any that already 134 * have that property. 135 */ 136 struct dentry *ocfs2_find_local_alias(struct inode *inode, 137 u64 parent_blkno, 138 int skip_unhashed) 139 { 140 struct list_head *p; 141 struct dentry *dentry = NULL; 142 143 spin_lock(&dcache_lock); 144 145 list_for_each(p, &inode->i_dentry) { 146 dentry = list_entry(p, struct dentry, d_alias); 147 148 if (ocfs2_match_dentry(dentry, parent_blkno, skip_unhashed)) { 149 mlog(0, "dentry found: %.*s\n", 150 dentry->d_name.len, dentry->d_name.name); 151 152 dget_locked(dentry); 153 break; 154 } 155 156 dentry = NULL; 157 } 158 159 spin_unlock(&dcache_lock); 160 161 return dentry; 162 } 163 164 DEFINE_SPINLOCK(dentry_attach_lock); 165 166 /* 167 * Attach this dentry to a cluster lock. 168 * 169 * Dentry locks cover all links in a given directory to a particular 170 * inode. We do this so that ocfs2 can build a lock name which all 171 * nodes in the cluster can agree on at all times. Shoving full names 172 * in the cluster lock won't work due to size restrictions. Covering 173 * links inside of a directory is a good compromise because it still 174 * allows us to use the parent directory lock to synchronize 175 * operations. 176 * 177 * Call this function with the parent dir semaphore and the parent dir 178 * cluster lock held. 179 * 180 * The dir semaphore will protect us from having to worry about 181 * concurrent processes on our node trying to attach a lock at the 182 * same time. 183 * 184 * The dir cluster lock (held at either PR or EX mode) protects us 185 * from unlink and rename on other nodes. 186 * 187 * A dput() can happen asynchronously due to pruning, so we cover 188 * attaching and detaching the dentry lock with a 189 * dentry_attach_lock. 190 * 191 * A node which has done lookup on a name retains a protected read 192 * lock until final dput. If the user requests and unlink or rename, 193 * the protected read is upgraded to an exclusive lock. Other nodes 194 * who have seen the dentry will then be informed that they need to 195 * downgrade their lock, which will involve d_delete on the 196 * dentry. This happens in ocfs2_dentry_convert_worker(). 197 */ 198 int ocfs2_dentry_attach_lock(struct dentry *dentry, 199 struct inode *inode, 200 u64 parent_blkno) 201 { 202 int ret; 203 struct dentry *alias; 204 struct ocfs2_dentry_lock *dl = dentry->d_fsdata; 205 206 mlog(0, "Attach \"%.*s\", parent %llu, fsdata: %p\n", 207 dentry->d_name.len, dentry->d_name.name, 208 (unsigned long long)parent_blkno, dl); 209 210 /* 211 * Negative dentry. We ignore these for now. 212 * 213 * XXX: Could we can improve ocfs2_dentry_revalidate() by 214 * tracking these? 215 */ 216 if (!inode) 217 return 0; 218 219 if (dl) { 220 mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno, 221 " \"%.*s\": old parent: %llu, new: %llu\n", 222 dentry->d_name.len, dentry->d_name.name, 223 (unsigned long long)parent_blkno, 224 (unsigned long long)dl->dl_parent_blkno); 225 return 0; 226 } 227 228 alias = ocfs2_find_local_alias(inode, parent_blkno, 0); 229 if (alias) { 230 /* 231 * Great, an alias exists, which means we must have a 232 * dentry lock already. We can just grab the lock off 233 * the alias and add it to the list. 234 * 235 * We're depending here on the fact that this dentry 236 * was found and exists in the dcache and so must have 237 * a reference to the dentry_lock because we can't 238 * race creates. Final dput() cannot happen on it 239 * since we have it pinned, so our reference is safe. 240 */ 241 dl = alias->d_fsdata; 242 mlog_bug_on_msg(!dl, "parent %llu, ino %llu\n", 243 (unsigned long long)parent_blkno, 244 (unsigned long long)OCFS2_I(inode)->ip_blkno); 245 246 mlog_bug_on_msg(dl->dl_parent_blkno != parent_blkno, 247 " \"%.*s\": old parent: %llu, new: %llu\n", 248 dentry->d_name.len, dentry->d_name.name, 249 (unsigned long long)parent_blkno, 250 (unsigned long long)dl->dl_parent_blkno); 251 252 mlog(0, "Found: %s\n", dl->dl_lockres.l_name); 253 254 goto out_attach; 255 } 256 257 /* 258 * There are no other aliases 259 */ 260 dl = kmalloc(sizeof(*dl), GFP_NOFS); 261 if (!dl) { 262 ret = -ENOMEM; 263 mlog_errno(ret); 264 return ret; 265 } 266 267 dl->dl_count = 0; 268 /* 269 * Does this have to happen below, for all attaches, in case 270 * the struct inode gets blown away by the downconvert thread? 271 */ 272 dl->dl_inode = igrab(inode); 273 dl->dl_parent_blkno = parent_blkno; 274 ocfs2_dentry_lock_res_init(dl, parent_blkno, inode); 275 276 out_attach: 277 spin_lock(&dentry_attach_lock); 278 dentry->d_fsdata = dl; 279 dl->dl_count++; 280 spin_unlock(&dentry_attach_lock); 281 282 /* 283 * This actually gets us our PRMODE level lock. From now on, 284 * we'll have a notification if one of these names is 285 * destroyed on another node. 286 */ 287 ret = ocfs2_dentry_lock(dentry, 0); 288 if (!ret) 289 ocfs2_dentry_unlock(dentry, 0); 290 else 291 mlog_errno(ret); 292 293 dput(alias); 294 295 return ret; 296 } 297 298 static DEFINE_SPINLOCK(dentry_list_lock); 299 300 /* We limit the number of dentry locks to drop in one go. We have 301 * this limit so that we don't starve other users of ocfs2_wq. */ 302 #define DL_INODE_DROP_COUNT 64 303 304 /* Drop inode references from dentry locks */ 305 void ocfs2_drop_dl_inodes(struct work_struct *work) 306 { 307 struct ocfs2_super *osb = container_of(work, struct ocfs2_super, 308 dentry_lock_work); 309 struct ocfs2_dentry_lock *dl; 310 int drop_count = DL_INODE_DROP_COUNT; 311 312 spin_lock(&dentry_list_lock); 313 while (osb->dentry_lock_list && drop_count--) { 314 dl = osb->dentry_lock_list; 315 osb->dentry_lock_list = dl->dl_next; 316 spin_unlock(&dentry_list_lock); 317 iput(dl->dl_inode); 318 kfree(dl); 319 spin_lock(&dentry_list_lock); 320 } 321 if (osb->dentry_lock_list) 322 queue_work(ocfs2_wq, &osb->dentry_lock_work); 323 spin_unlock(&dentry_list_lock); 324 } 325 326 /* 327 * ocfs2_dentry_iput() and friends. 328 * 329 * At this point, our particular dentry is detached from the inodes 330 * alias list, so there's no way that the locking code can find it. 331 * 332 * The interesting stuff happens when we determine that our lock needs 333 * to go away because this is the last subdir alias in the 334 * system. This function needs to handle a couple things: 335 * 336 * 1) Synchronizing lock shutdown with the downconvert threads. This 337 * is already handled for us via the lockres release drop function 338 * called in ocfs2_release_dentry_lock() 339 * 340 * 2) A race may occur when we're doing our lock shutdown and 341 * another process wants to create a new dentry lock. Right now we 342 * let them race, which means that for a very short while, this 343 * node might have two locks on a lock resource. This should be a 344 * problem though because one of them is in the process of being 345 * thrown out. 346 */ 347 static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb, 348 struct ocfs2_dentry_lock *dl) 349 { 350 ocfs2_simple_drop_lockres(osb, &dl->dl_lockres); 351 ocfs2_lock_res_free(&dl->dl_lockres); 352 353 /* We leave dropping of inode reference to ocfs2_wq as that can 354 * possibly lead to inode deletion which gets tricky */ 355 spin_lock(&dentry_list_lock); 356 if (!osb->dentry_lock_list) 357 queue_work(ocfs2_wq, &osb->dentry_lock_work); 358 dl->dl_next = osb->dentry_lock_list; 359 osb->dentry_lock_list = dl; 360 spin_unlock(&dentry_list_lock); 361 } 362 363 void ocfs2_dentry_lock_put(struct ocfs2_super *osb, 364 struct ocfs2_dentry_lock *dl) 365 { 366 int unlock; 367 368 BUG_ON(dl->dl_count == 0); 369 370 spin_lock(&dentry_attach_lock); 371 dl->dl_count--; 372 unlock = !dl->dl_count; 373 spin_unlock(&dentry_attach_lock); 374 375 if (unlock) 376 ocfs2_drop_dentry_lock(osb, dl); 377 } 378 379 static void ocfs2_dentry_iput(struct dentry *dentry, struct inode *inode) 380 { 381 struct ocfs2_dentry_lock *dl = dentry->d_fsdata; 382 383 if (!dl) { 384 /* 385 * No dentry lock is ok if we're disconnected or 386 * unhashed. 387 */ 388 if (!(dentry->d_flags & DCACHE_DISCONNECTED) && 389 !d_unhashed(dentry)) { 390 unsigned long long ino = 0ULL; 391 if (inode) 392 ino = (unsigned long long)OCFS2_I(inode)->ip_blkno; 393 mlog(ML_ERROR, "Dentry is missing cluster lock. " 394 "inode: %llu, d_flags: 0x%x, d_name: %.*s\n", 395 ino, dentry->d_flags, dentry->d_name.len, 396 dentry->d_name.name); 397 } 398 399 goto out; 400 } 401 402 mlog_bug_on_msg(dl->dl_count == 0, "dentry: %.*s, count: %u\n", 403 dentry->d_name.len, dentry->d_name.name, 404 dl->dl_count); 405 406 ocfs2_dentry_lock_put(OCFS2_SB(dentry->d_sb), dl); 407 408 out: 409 iput(inode); 410 } 411 412 /* 413 * d_move(), but keep the locks in sync. 414 * 415 * When we are done, "dentry" will have the parent dir and name of 416 * "target", which will be thrown away. 417 * 418 * We manually update the lock of "dentry" if need be. 419 * 420 * "target" doesn't have it's dentry lock touched - we allow the later 421 * dput() to handle this for us. 422 * 423 * This is called during ocfs2_rename(), while holding parent 424 * directory locks. The dentries have already been deleted on other 425 * nodes via ocfs2_remote_dentry_delete(). 426 * 427 * Normally, the VFS handles the d_move() for the file system, after 428 * the ->rename() callback. OCFS2 wants to handle this internally, so 429 * the new lock can be created atomically with respect to the cluster. 430 */ 431 void ocfs2_dentry_move(struct dentry *dentry, struct dentry *target, 432 struct inode *old_dir, struct inode *new_dir) 433 { 434 int ret; 435 struct ocfs2_super *osb = OCFS2_SB(old_dir->i_sb); 436 struct inode *inode = dentry->d_inode; 437 438 /* 439 * Move within the same directory, so the actual lock info won't 440 * change. 441 * 442 * XXX: Is there any advantage to dropping the lock here? 443 */ 444 if (old_dir == new_dir) 445 goto out_move; 446 447 ocfs2_dentry_lock_put(osb, dentry->d_fsdata); 448 449 dentry->d_fsdata = NULL; 450 ret = ocfs2_dentry_attach_lock(dentry, inode, OCFS2_I(new_dir)->ip_blkno); 451 if (ret) 452 mlog_errno(ret); 453 454 out_move: 455 d_move(dentry, target); 456 } 457 458 const struct dentry_operations ocfs2_dentry_ops = { 459 .d_revalidate = ocfs2_dentry_revalidate, 460 .d_iput = ocfs2_dentry_iput, 461 }; 462