xref: /linux/fs/cachefiles/namei.c (revision 3c4fc7bf4c9e66fe71abcbf93f62f4ddb89b7f15)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* CacheFiles path walking and related routines
3  *
4  * Copyright (C) 2021 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/fs.h>
9 #include <linux/namei.h>
10 #include "internal.h"
11 
12 /*
13  * Mark the backing file as being a cache file if it's not already in use.  The
14  * mark tells the culling request command that it's not allowed to cull the
15  * file or directory.  The caller must hold the inode lock.
16  */
17 static bool __cachefiles_mark_inode_in_use(struct cachefiles_object *object,
18 					   struct dentry *dentry)
19 {
20 	struct inode *inode = d_backing_inode(dentry);
21 	bool can_use = false;
22 
23 	if (!(inode->i_flags & S_KERNEL_FILE)) {
24 		inode->i_flags |= S_KERNEL_FILE;
25 		trace_cachefiles_mark_active(object, inode);
26 		can_use = true;
27 	} else {
28 		trace_cachefiles_mark_failed(object, inode);
29 		pr_notice("cachefiles: Inode already in use: %pd (B=%lx)\n",
30 			  dentry, inode->i_ino);
31 	}
32 
33 	return can_use;
34 }
35 
36 static bool cachefiles_mark_inode_in_use(struct cachefiles_object *object,
37 					 struct dentry *dentry)
38 {
39 	struct inode *inode = d_backing_inode(dentry);
40 	bool can_use;
41 
42 	inode_lock(inode);
43 	can_use = __cachefiles_mark_inode_in_use(object, dentry);
44 	inode_unlock(inode);
45 	return can_use;
46 }
47 
48 /*
49  * Unmark a backing inode.  The caller must hold the inode lock.
50  */
51 static void __cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
52 					     struct dentry *dentry)
53 {
54 	struct inode *inode = d_backing_inode(dentry);
55 
56 	inode->i_flags &= ~S_KERNEL_FILE;
57 	trace_cachefiles_mark_inactive(object, inode);
58 }
59 
60 static void cachefiles_do_unmark_inode_in_use(struct cachefiles_object *object,
61 					      struct dentry *dentry)
62 {
63 	struct inode *inode = d_backing_inode(dentry);
64 
65 	inode_lock(inode);
66 	__cachefiles_unmark_inode_in_use(object, dentry);
67 	inode_unlock(inode);
68 }
69 
70 /*
71  * Unmark a backing inode and tell cachefilesd that there's something that can
72  * be culled.
73  */
74 void cachefiles_unmark_inode_in_use(struct cachefiles_object *object,
75 				    struct file *file)
76 {
77 	struct cachefiles_cache *cache = object->volume->cache;
78 	struct inode *inode = file_inode(file);
79 
80 	if (inode) {
81 		cachefiles_do_unmark_inode_in_use(object, file->f_path.dentry);
82 
83 		if (!test_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags)) {
84 			atomic_long_add(inode->i_blocks, &cache->b_released);
85 			if (atomic_inc_return(&cache->f_released))
86 				cachefiles_state_changed(cache);
87 		}
88 	}
89 }
90 
91 /*
92  * get a subdirectory
93  */
94 struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
95 					struct dentry *dir,
96 					const char *dirname,
97 					bool *_is_new)
98 {
99 	struct dentry *subdir;
100 	struct path path;
101 	int ret;
102 
103 	_enter(",,%s", dirname);
104 
105 	/* search the current directory for the element name */
106 	inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
107 
108 retry:
109 	ret = cachefiles_inject_read_error();
110 	if (ret == 0)
111 		subdir = lookup_one_len(dirname, dir, strlen(dirname));
112 	else
113 		subdir = ERR_PTR(ret);
114 	trace_cachefiles_lookup(NULL, dir, subdir);
115 	if (IS_ERR(subdir)) {
116 		trace_cachefiles_vfs_error(NULL, d_backing_inode(dir),
117 					   PTR_ERR(subdir),
118 					   cachefiles_trace_lookup_error);
119 		if (PTR_ERR(subdir) == -ENOMEM)
120 			goto nomem_d_alloc;
121 		goto lookup_error;
122 	}
123 
124 	_debug("subdir -> %pd %s",
125 	       subdir, d_backing_inode(subdir) ? "positive" : "negative");
126 
127 	/* we need to create the subdir if it doesn't exist yet */
128 	if (d_is_negative(subdir)) {
129 		ret = cachefiles_has_space(cache, 1, 0,
130 					   cachefiles_has_space_for_create);
131 		if (ret < 0)
132 			goto mkdir_error;
133 
134 		_debug("attempt mkdir");
135 
136 		path.mnt = cache->mnt;
137 		path.dentry = dir;
138 		ret = security_path_mkdir(&path, subdir, 0700);
139 		if (ret < 0)
140 			goto mkdir_error;
141 		ret = cachefiles_inject_write_error();
142 		if (ret == 0)
143 			ret = vfs_mkdir(&init_user_ns, d_inode(dir), subdir, 0700);
144 		if (ret < 0) {
145 			trace_cachefiles_vfs_error(NULL, d_inode(dir), ret,
146 						   cachefiles_trace_mkdir_error);
147 			goto mkdir_error;
148 		}
149 		trace_cachefiles_mkdir(dir, subdir);
150 
151 		if (unlikely(d_unhashed(subdir))) {
152 			cachefiles_put_directory(subdir);
153 			goto retry;
154 		}
155 		ASSERT(d_backing_inode(subdir));
156 
157 		_debug("mkdir -> %pd{ino=%lu}",
158 		       subdir, d_backing_inode(subdir)->i_ino);
159 		if (_is_new)
160 			*_is_new = true;
161 	}
162 
163 	/* Tell rmdir() it's not allowed to delete the subdir */
164 	inode_lock(d_inode(subdir));
165 	inode_unlock(d_inode(dir));
166 
167 	if (!__cachefiles_mark_inode_in_use(NULL, subdir))
168 		goto mark_error;
169 
170 	inode_unlock(d_inode(subdir));
171 
172 	/* we need to make sure the subdir is a directory */
173 	ASSERT(d_backing_inode(subdir));
174 
175 	if (!d_can_lookup(subdir)) {
176 		pr_err("%s is not a directory\n", dirname);
177 		ret = -EIO;
178 		goto check_error;
179 	}
180 
181 	ret = -EPERM;
182 	if (!(d_backing_inode(subdir)->i_opflags & IOP_XATTR) ||
183 	    !d_backing_inode(subdir)->i_op->lookup ||
184 	    !d_backing_inode(subdir)->i_op->mkdir ||
185 	    !d_backing_inode(subdir)->i_op->rename ||
186 	    !d_backing_inode(subdir)->i_op->rmdir ||
187 	    !d_backing_inode(subdir)->i_op->unlink)
188 		goto check_error;
189 
190 	_leave(" = [%lu]", d_backing_inode(subdir)->i_ino);
191 	return subdir;
192 
193 check_error:
194 	cachefiles_put_directory(subdir);
195 	_leave(" = %d [check]", ret);
196 	return ERR_PTR(ret);
197 
198 mark_error:
199 	inode_unlock(d_inode(subdir));
200 	dput(subdir);
201 	return ERR_PTR(-EBUSY);
202 
203 mkdir_error:
204 	inode_unlock(d_inode(dir));
205 	dput(subdir);
206 	pr_err("mkdir %s failed with error %d\n", dirname, ret);
207 	return ERR_PTR(ret);
208 
209 lookup_error:
210 	inode_unlock(d_inode(dir));
211 	ret = PTR_ERR(subdir);
212 	pr_err("Lookup %s failed with error %d\n", dirname, ret);
213 	return ERR_PTR(ret);
214 
215 nomem_d_alloc:
216 	inode_unlock(d_inode(dir));
217 	_leave(" = -ENOMEM");
218 	return ERR_PTR(-ENOMEM);
219 }
220 
221 /*
222  * Put a subdirectory.
223  */
224 void cachefiles_put_directory(struct dentry *dir)
225 {
226 	if (dir) {
227 		inode_lock(dir->d_inode);
228 		__cachefiles_unmark_inode_in_use(NULL, dir);
229 		inode_unlock(dir->d_inode);
230 		dput(dir);
231 	}
232 }
233 
234 /*
235  * Remove a regular file from the cache.
236  */
237 static int cachefiles_unlink(struct cachefiles_cache *cache,
238 			     struct cachefiles_object *object,
239 			     struct dentry *dir, struct dentry *dentry,
240 			     enum fscache_why_object_killed why)
241 {
242 	struct path path = {
243 		.mnt	= cache->mnt,
244 		.dentry	= dir,
245 	};
246 	int ret;
247 
248 	trace_cachefiles_unlink(object, d_inode(dentry)->i_ino, why);
249 	ret = security_path_unlink(&path, dentry);
250 	if (ret < 0) {
251 		cachefiles_io_error(cache, "Unlink security error");
252 		return ret;
253 	}
254 
255 	ret = cachefiles_inject_remove_error();
256 	if (ret == 0) {
257 		ret = vfs_unlink(&init_user_ns, d_backing_inode(dir), dentry, NULL);
258 		if (ret == -EIO)
259 			cachefiles_io_error(cache, "Unlink failed");
260 	}
261 	if (ret != 0)
262 		trace_cachefiles_vfs_error(object, d_backing_inode(dir), ret,
263 					   cachefiles_trace_unlink_error);
264 	return ret;
265 }
266 
267 /*
268  * Delete an object representation from the cache
269  * - File backed objects are unlinked
270  * - Directory backed objects are stuffed into the graveyard for userspace to
271  *   delete
272  */
273 int cachefiles_bury_object(struct cachefiles_cache *cache,
274 			   struct cachefiles_object *object,
275 			   struct dentry *dir,
276 			   struct dentry *rep,
277 			   enum fscache_why_object_killed why)
278 {
279 	struct dentry *grave, *trap;
280 	struct path path, path_to_graveyard;
281 	char nbuffer[8 + 8 + 1];
282 	int ret;
283 
284 	_enter(",'%pd','%pd'", dir, rep);
285 
286 	if (rep->d_parent != dir) {
287 		inode_unlock(d_inode(dir));
288 		_leave(" = -ESTALE");
289 		return -ESTALE;
290 	}
291 
292 	/* non-directories can just be unlinked */
293 	if (!d_is_dir(rep)) {
294 		dget(rep); /* Stop the dentry being negated if it's only pinned
295 			    * by a file struct.
296 			    */
297 		ret = cachefiles_unlink(cache, object, dir, rep, why);
298 		dput(rep);
299 
300 		inode_unlock(d_inode(dir));
301 		_leave(" = %d", ret);
302 		return ret;
303 	}
304 
305 	/* directories have to be moved to the graveyard */
306 	_debug("move stale object to graveyard");
307 	inode_unlock(d_inode(dir));
308 
309 try_again:
310 	/* first step is to make up a grave dentry in the graveyard */
311 	sprintf(nbuffer, "%08x%08x",
312 		(uint32_t) ktime_get_real_seconds(),
313 		(uint32_t) atomic_inc_return(&cache->gravecounter));
314 
315 	/* do the multiway lock magic */
316 	trap = lock_rename(cache->graveyard, dir);
317 
318 	/* do some checks before getting the grave dentry */
319 	if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
320 		/* the entry was probably culled when we dropped the parent dir
321 		 * lock */
322 		unlock_rename(cache->graveyard, dir);
323 		_leave(" = 0 [culled?]");
324 		return 0;
325 	}
326 
327 	if (!d_can_lookup(cache->graveyard)) {
328 		unlock_rename(cache->graveyard, dir);
329 		cachefiles_io_error(cache, "Graveyard no longer a directory");
330 		return -EIO;
331 	}
332 
333 	if (trap == rep) {
334 		unlock_rename(cache->graveyard, dir);
335 		cachefiles_io_error(cache, "May not make directory loop");
336 		return -EIO;
337 	}
338 
339 	if (d_mountpoint(rep)) {
340 		unlock_rename(cache->graveyard, dir);
341 		cachefiles_io_error(cache, "Mountpoint in cache");
342 		return -EIO;
343 	}
344 
345 	grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer));
346 	if (IS_ERR(grave)) {
347 		unlock_rename(cache->graveyard, dir);
348 		trace_cachefiles_vfs_error(object, d_inode(cache->graveyard),
349 					   PTR_ERR(grave),
350 					   cachefiles_trace_lookup_error);
351 
352 		if (PTR_ERR(grave) == -ENOMEM) {
353 			_leave(" = -ENOMEM");
354 			return -ENOMEM;
355 		}
356 
357 		cachefiles_io_error(cache, "Lookup error %ld", PTR_ERR(grave));
358 		return -EIO;
359 	}
360 
361 	if (d_is_positive(grave)) {
362 		unlock_rename(cache->graveyard, dir);
363 		dput(grave);
364 		grave = NULL;
365 		cond_resched();
366 		goto try_again;
367 	}
368 
369 	if (d_mountpoint(grave)) {
370 		unlock_rename(cache->graveyard, dir);
371 		dput(grave);
372 		cachefiles_io_error(cache, "Mountpoint in graveyard");
373 		return -EIO;
374 	}
375 
376 	/* target should not be an ancestor of source */
377 	if (trap == grave) {
378 		unlock_rename(cache->graveyard, dir);
379 		dput(grave);
380 		cachefiles_io_error(cache, "May not make directory loop");
381 		return -EIO;
382 	}
383 
384 	/* attempt the rename */
385 	path.mnt = cache->mnt;
386 	path.dentry = dir;
387 	path_to_graveyard.mnt = cache->mnt;
388 	path_to_graveyard.dentry = cache->graveyard;
389 	ret = security_path_rename(&path, rep, &path_to_graveyard, grave, 0);
390 	if (ret < 0) {
391 		cachefiles_io_error(cache, "Rename security error %d", ret);
392 	} else {
393 		struct renamedata rd = {
394 			.old_mnt_userns	= &init_user_ns,
395 			.old_dir	= d_inode(dir),
396 			.old_dentry	= rep,
397 			.new_mnt_userns	= &init_user_ns,
398 			.new_dir	= d_inode(cache->graveyard),
399 			.new_dentry	= grave,
400 		};
401 		trace_cachefiles_rename(object, d_inode(rep)->i_ino, why);
402 		ret = cachefiles_inject_read_error();
403 		if (ret == 0)
404 			ret = vfs_rename(&rd);
405 		if (ret != 0)
406 			trace_cachefiles_vfs_error(object, d_inode(dir), ret,
407 						   cachefiles_trace_rename_error);
408 		if (ret != 0 && ret != -ENOMEM)
409 			cachefiles_io_error(cache,
410 					    "Rename failed with error %d", ret);
411 	}
412 
413 	__cachefiles_unmark_inode_in_use(object, rep);
414 	unlock_rename(cache->graveyard, dir);
415 	dput(grave);
416 	_leave(" = 0");
417 	return 0;
418 }
419 
420 /*
421  * Delete a cache file.
422  */
423 int cachefiles_delete_object(struct cachefiles_object *object,
424 			     enum fscache_why_object_killed why)
425 {
426 	struct cachefiles_volume *volume = object->volume;
427 	struct dentry *dentry = object->file->f_path.dentry;
428 	struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
429 	int ret;
430 
431 	_enter(",OBJ%x{%pD}", object->debug_id, object->file);
432 
433 	/* Stop the dentry being negated if it's only pinned by a file struct. */
434 	dget(dentry);
435 
436 	inode_lock_nested(d_backing_inode(fan), I_MUTEX_PARENT);
437 	ret = cachefiles_unlink(volume->cache, object, fan, dentry, why);
438 	inode_unlock(d_backing_inode(fan));
439 	dput(dentry);
440 	return ret;
441 }
442 
443 /*
444  * Create a temporary file and leave it unattached and un-xattr'd until the
445  * time comes to discard the object from memory.
446  */
447 struct file *cachefiles_create_tmpfile(struct cachefiles_object *object)
448 {
449 	struct cachefiles_volume *volume = object->volume;
450 	struct cachefiles_cache *cache = volume->cache;
451 	const struct cred *saved_cred;
452 	struct dentry *fan = volume->fanout[(u8)object->cookie->key_hash];
453 	struct file *file;
454 	struct path path;
455 	uint64_t ni_size;
456 	long ret;
457 
458 
459 	cachefiles_begin_secure(cache, &saved_cred);
460 
461 	path.mnt = cache->mnt;
462 	ret = cachefiles_inject_write_error();
463 	if (ret == 0)
464 		path.dentry = vfs_tmpfile(&init_user_ns, fan, S_IFREG, O_RDWR);
465 	else
466 		path.dentry = ERR_PTR(ret);
467 	if (IS_ERR(path.dentry)) {
468 		trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(path.dentry),
469 					   cachefiles_trace_tmpfile_error);
470 		if (PTR_ERR(path.dentry) == -EIO)
471 			cachefiles_io_error_obj(object, "Failed to create tmpfile");
472 		file = ERR_CAST(path.dentry);
473 		goto out;
474 	}
475 
476 	trace_cachefiles_tmpfile(object, d_backing_inode(path.dentry));
477 
478 	if (!cachefiles_mark_inode_in_use(object, path.dentry)) {
479 		file = ERR_PTR(-EBUSY);
480 		goto out_dput;
481 	}
482 
483 	ret = cachefiles_ondemand_init_object(object);
484 	if (ret < 0) {
485 		file = ERR_PTR(ret);
486 		goto out_unuse;
487 	}
488 
489 	ni_size = object->cookie->object_size;
490 	ni_size = round_up(ni_size, CACHEFILES_DIO_BLOCK_SIZE);
491 
492 	if (ni_size > 0) {
493 		trace_cachefiles_trunc(object, d_backing_inode(path.dentry), 0, ni_size,
494 				       cachefiles_trunc_expand_tmpfile);
495 		ret = cachefiles_inject_write_error();
496 		if (ret == 0)
497 			ret = vfs_truncate(&path, ni_size);
498 		if (ret < 0) {
499 			trace_cachefiles_vfs_error(
500 				object, d_backing_inode(path.dentry), ret,
501 				cachefiles_trace_trunc_error);
502 			file = ERR_PTR(ret);
503 			goto out_unuse;
504 		}
505 	}
506 
507 	file = open_with_fake_path(&path, O_RDWR | O_LARGEFILE | O_DIRECT,
508 				   d_backing_inode(path.dentry), cache->cache_cred);
509 	if (IS_ERR(file)) {
510 		trace_cachefiles_vfs_error(object, d_backing_inode(path.dentry),
511 					   PTR_ERR(file),
512 					   cachefiles_trace_open_error);
513 		goto out_unuse;
514 	}
515 	if (unlikely(!file->f_op->read_iter) ||
516 	    unlikely(!file->f_op->write_iter)) {
517 		fput(file);
518 		pr_notice("Cache does not support read_iter and write_iter\n");
519 		file = ERR_PTR(-EINVAL);
520 		goto out_unuse;
521 	}
522 
523 	goto out_dput;
524 
525 out_unuse:
526 	cachefiles_do_unmark_inode_in_use(object, path.dentry);
527 out_dput:
528 	dput(path.dentry);
529 out:
530 	cachefiles_end_secure(cache, saved_cred);
531 	return file;
532 }
533 
534 /*
535  * Create a new file.
536  */
537 static bool cachefiles_create_file(struct cachefiles_object *object)
538 {
539 	struct file *file;
540 	int ret;
541 
542 	ret = cachefiles_has_space(object->volume->cache, 1, 0,
543 				   cachefiles_has_space_for_create);
544 	if (ret < 0)
545 		return false;
546 
547 	file = cachefiles_create_tmpfile(object);
548 	if (IS_ERR(file))
549 		return false;
550 
551 	set_bit(FSCACHE_COOKIE_NEEDS_UPDATE, &object->cookie->flags);
552 	set_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
553 	_debug("create -> %pD{ino=%lu}", file, file_inode(file)->i_ino);
554 	object->file = file;
555 	return true;
556 }
557 
558 /*
559  * Open an existing file, checking its attributes and replacing it if it is
560  * stale.
561  */
562 static bool cachefiles_open_file(struct cachefiles_object *object,
563 				 struct dentry *dentry)
564 {
565 	struct cachefiles_cache *cache = object->volume->cache;
566 	struct file *file;
567 	struct path path;
568 	int ret;
569 
570 	_enter("%pd", dentry);
571 
572 	if (!cachefiles_mark_inode_in_use(object, dentry))
573 		return false;
574 
575 	/* We need to open a file interface onto a data file now as we can't do
576 	 * it on demand because writeback called from do_exit() sees
577 	 * current->fs == NULL - which breaks d_path() called from ext4 open.
578 	 */
579 	path.mnt = cache->mnt;
580 	path.dentry = dentry;
581 	file = open_with_fake_path(&path, O_RDWR | O_LARGEFILE | O_DIRECT,
582 				   d_backing_inode(dentry), cache->cache_cred);
583 	if (IS_ERR(file)) {
584 		trace_cachefiles_vfs_error(object, d_backing_inode(dentry),
585 					   PTR_ERR(file),
586 					   cachefiles_trace_open_error);
587 		goto error;
588 	}
589 
590 	if (unlikely(!file->f_op->read_iter) ||
591 	    unlikely(!file->f_op->write_iter)) {
592 		pr_notice("Cache does not support read_iter and write_iter\n");
593 		goto error_fput;
594 	}
595 	_debug("file -> %pd positive", dentry);
596 
597 	ret = cachefiles_ondemand_init_object(object);
598 	if (ret < 0)
599 		goto error_fput;
600 
601 	ret = cachefiles_check_auxdata(object, file);
602 	if (ret < 0)
603 		goto check_failed;
604 
605 	object->file = file;
606 
607 	/* Always update the atime on an object we've just looked up (this is
608 	 * used to keep track of culling, and atimes are only updated by read,
609 	 * write and readdir but not lookup or open).
610 	 */
611 	touch_atime(&file->f_path);
612 	dput(dentry);
613 	return true;
614 
615 check_failed:
616 	fscache_cookie_lookup_negative(object->cookie);
617 	cachefiles_unmark_inode_in_use(object, file);
618 	fput(file);
619 	dput(dentry);
620 	if (ret == -ESTALE)
621 		return cachefiles_create_file(object);
622 	return false;
623 
624 error_fput:
625 	fput(file);
626 error:
627 	cachefiles_do_unmark_inode_in_use(object, dentry);
628 	dput(dentry);
629 	return false;
630 }
631 
632 /*
633  * walk from the parent object to the child object through the backing
634  * filesystem, creating directories as we go
635  */
636 bool cachefiles_look_up_object(struct cachefiles_object *object)
637 {
638 	struct cachefiles_volume *volume = object->volume;
639 	struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
640 	int ret;
641 
642 	_enter("OBJ%x,%s,", object->debug_id, object->d_name);
643 
644 	/* Look up path "cache/vol/fanout/file". */
645 	ret = cachefiles_inject_read_error();
646 	if (ret == 0)
647 		dentry = lookup_positive_unlocked(object->d_name, fan,
648 						  object->d_name_len);
649 	else
650 		dentry = ERR_PTR(ret);
651 	trace_cachefiles_lookup(object, fan, dentry);
652 	if (IS_ERR(dentry)) {
653 		if (dentry == ERR_PTR(-ENOENT))
654 			goto new_file;
655 		if (dentry == ERR_PTR(-EIO))
656 			cachefiles_io_error_obj(object, "Lookup failed");
657 		return false;
658 	}
659 
660 	if (!d_is_reg(dentry)) {
661 		pr_err("%pd is not a file\n", dentry);
662 		inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
663 		ret = cachefiles_bury_object(volume->cache, object, fan, dentry,
664 					     FSCACHE_OBJECT_IS_WEIRD);
665 		dput(dentry);
666 		if (ret < 0)
667 			return false;
668 		goto new_file;
669 	}
670 
671 	if (!cachefiles_open_file(object, dentry))
672 		return false;
673 
674 	_leave(" = t [%lu]", file_inode(object->file)->i_ino);
675 	return true;
676 
677 new_file:
678 	fscache_cookie_lookup_negative(object->cookie);
679 	return cachefiles_create_file(object);
680 }
681 
682 /*
683  * Attempt to link a temporary file into its rightful place in the cache.
684  */
685 bool cachefiles_commit_tmpfile(struct cachefiles_cache *cache,
686 			       struct cachefiles_object *object)
687 {
688 	struct cachefiles_volume *volume = object->volume;
689 	struct dentry *dentry, *fan = volume->fanout[(u8)object->cookie->key_hash];
690 	bool success = false;
691 	int ret;
692 
693 	_enter(",%pD", object->file);
694 
695 	inode_lock_nested(d_inode(fan), I_MUTEX_PARENT);
696 	ret = cachefiles_inject_read_error();
697 	if (ret == 0)
698 		dentry = lookup_one_len(object->d_name, fan, object->d_name_len);
699 	else
700 		dentry = ERR_PTR(ret);
701 	if (IS_ERR(dentry)) {
702 		trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
703 					   cachefiles_trace_lookup_error);
704 		_debug("lookup fail %ld", PTR_ERR(dentry));
705 		goto out_unlock;
706 	}
707 
708 	if (!d_is_negative(dentry)) {
709 		if (d_backing_inode(dentry) == file_inode(object->file)) {
710 			success = true;
711 			goto out_dput;
712 		}
713 
714 		ret = cachefiles_unlink(volume->cache, object, fan, dentry,
715 					FSCACHE_OBJECT_IS_STALE);
716 		if (ret < 0)
717 			goto out_dput;
718 
719 		dput(dentry);
720 		ret = cachefiles_inject_read_error();
721 		if (ret == 0)
722 			dentry = lookup_one_len(object->d_name, fan, object->d_name_len);
723 		else
724 			dentry = ERR_PTR(ret);
725 		if (IS_ERR(dentry)) {
726 			trace_cachefiles_vfs_error(object, d_inode(fan), PTR_ERR(dentry),
727 						   cachefiles_trace_lookup_error);
728 			_debug("lookup fail %ld", PTR_ERR(dentry));
729 			goto out_unlock;
730 		}
731 	}
732 
733 	ret = cachefiles_inject_read_error();
734 	if (ret == 0)
735 		ret = vfs_link(object->file->f_path.dentry, &init_user_ns,
736 			       d_inode(fan), dentry, NULL);
737 	if (ret < 0) {
738 		trace_cachefiles_vfs_error(object, d_inode(fan), ret,
739 					   cachefiles_trace_link_error);
740 		_debug("link fail %d", ret);
741 	} else {
742 		trace_cachefiles_link(object, file_inode(object->file));
743 		spin_lock(&object->lock);
744 		/* TODO: Do we want to switch the file pointer to the new dentry? */
745 		clear_bit(CACHEFILES_OBJECT_USING_TMPFILE, &object->flags);
746 		spin_unlock(&object->lock);
747 		success = true;
748 	}
749 
750 out_dput:
751 	dput(dentry);
752 out_unlock:
753 	inode_unlock(d_inode(fan));
754 	_leave(" = %u", success);
755 	return success;
756 }
757 
758 /*
759  * Look up an inode to be checked or culled.  Return -EBUSY if the inode is
760  * marked in use.
761  */
762 static struct dentry *cachefiles_lookup_for_cull(struct cachefiles_cache *cache,
763 						 struct dentry *dir,
764 						 char *filename)
765 {
766 	struct dentry *victim;
767 	int ret = -ENOENT;
768 
769 	inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
770 
771 	victim = lookup_one_len(filename, dir, strlen(filename));
772 	if (IS_ERR(victim))
773 		goto lookup_error;
774 	if (d_is_negative(victim))
775 		goto lookup_put;
776 	if (d_inode(victim)->i_flags & S_KERNEL_FILE)
777 		goto lookup_busy;
778 	return victim;
779 
780 lookup_busy:
781 	ret = -EBUSY;
782 lookup_put:
783 	inode_unlock(d_inode(dir));
784 	dput(victim);
785 	return ERR_PTR(ret);
786 
787 lookup_error:
788 	inode_unlock(d_inode(dir));
789 	ret = PTR_ERR(victim);
790 	if (ret == -ENOENT)
791 		return ERR_PTR(-ESTALE); /* Probably got retired by the netfs */
792 
793 	if (ret == -EIO) {
794 		cachefiles_io_error(cache, "Lookup failed");
795 	} else if (ret != -ENOMEM) {
796 		pr_err("Internal error: %d\n", ret);
797 		ret = -EIO;
798 	}
799 
800 	return ERR_PTR(ret);
801 }
802 
803 /*
804  * Cull an object if it's not in use
805  * - called only by cache manager daemon
806  */
807 int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
808 		    char *filename)
809 {
810 	struct dentry *victim;
811 	struct inode *inode;
812 	int ret;
813 
814 	_enter(",%pd/,%s", dir, filename);
815 
816 	victim = cachefiles_lookup_for_cull(cache, dir, filename);
817 	if (IS_ERR(victim))
818 		return PTR_ERR(victim);
819 
820 	/* check to see if someone is using this object */
821 	inode = d_inode(victim);
822 	inode_lock(inode);
823 	if (inode->i_flags & S_KERNEL_FILE) {
824 		ret = -EBUSY;
825 	} else {
826 		/* Stop the cache from picking it back up */
827 		inode->i_flags |= S_KERNEL_FILE;
828 		ret = 0;
829 	}
830 	inode_unlock(inode);
831 	if (ret < 0)
832 		goto error_unlock;
833 
834 	ret = cachefiles_bury_object(cache, NULL, dir, victim,
835 				     FSCACHE_OBJECT_WAS_CULLED);
836 	if (ret < 0)
837 		goto error;
838 
839 	fscache_count_culled();
840 	dput(victim);
841 	_leave(" = 0");
842 	return 0;
843 
844 error_unlock:
845 	inode_unlock(d_inode(dir));
846 error:
847 	dput(victim);
848 	if (ret == -ENOENT)
849 		return -ESTALE; /* Probably got retired by the netfs */
850 
851 	if (ret != -ENOMEM) {
852 		pr_err("Internal error: %d\n", ret);
853 		ret = -EIO;
854 	}
855 
856 	_leave(" = %d", ret);
857 	return ret;
858 }
859 
860 /*
861  * Find out if an object is in use or not
862  * - called only by cache manager daemon
863  * - returns -EBUSY or 0 to indicate whether an object is in use or not
864  */
865 int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
866 			    char *filename)
867 {
868 	struct dentry *victim;
869 	int ret = 0;
870 
871 	victim = cachefiles_lookup_for_cull(cache, dir, filename);
872 	if (IS_ERR(victim))
873 		return PTR_ERR(victim);
874 
875 	inode_unlock(d_inode(dir));
876 	dput(victim);
877 	return ret;
878 }
879