xref: /linux/fs/xfs/xfs_inode.c (revision 663ea69540c8d7ba332c9a3129d7f3cf5de50d9b)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include <linux/iversion.h>
7 
8 #include "xfs_platform.h"
9 #include "xfs_fs.h"
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_dir2.h"
18 #include "xfs_attr.h"
19 #include "xfs_bit.h"
20 #include "xfs_trans_space.h"
21 #include "xfs_trans.h"
22 #include "xfs_buf_item.h"
23 #include "xfs_inode_item.h"
24 #include "xfs_iunlink_item.h"
25 #include "xfs_ialloc.h"
26 #include "xfs_bmap.h"
27 #include "xfs_bmap_util.h"
28 #include "xfs_errortag.h"
29 #include "xfs_error.h"
30 #include "xfs_quota.h"
31 #include "xfs_filestream.h"
32 #include "xfs_trace.h"
33 #include "xfs_icache.h"
34 #include "xfs_symlink.h"
35 #include "xfs_trans_priv.h"
36 #include "xfs_log.h"
37 #include "xfs_bmap_btree.h"
38 #include "xfs_reflink.h"
39 #include "xfs_ag.h"
40 #include "xfs_log_priv.h"
41 #include "xfs_health.h"
42 #include "xfs_pnfs.h"
43 #include "xfs_parent.h"
44 #include "xfs_xattr.h"
45 #include "xfs_inode_util.h"
46 #include "xfs_metafile.h"
47 
48 struct kmem_cache *xfs_inode_cache;
49 
50 /*
51  * These two are wrapper routines around the xfs_ilock() routine used to
52  * centralize some grungy code.  They are used in places that wish to lock the
53  * inode solely for reading the extents.  The reason these places can't just
54  * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
55  * bringing in of the extents from disk for a file in b-tree format.  If the
56  * inode is in b-tree format, then we need to lock the inode exclusively until
57  * the extents are read in.  Locking it exclusively all the time would limit
58  * our parallelism unnecessarily, though.  What we do instead is check to see
59  * if the extents have been read in yet, and only lock the inode exclusively
60  * if they have not.
61  *
62  * The functions return a value which should be given to the corresponding
63  * xfs_iunlock() call.
64  */
65 uint
xfs_ilock_data_map_shared(struct xfs_inode * ip)66 xfs_ilock_data_map_shared(
67 	struct xfs_inode	*ip)
68 {
69 	uint			lock_mode = XFS_ILOCK_SHARED;
70 
71 	if (xfs_need_iread_extents(&ip->i_df))
72 		lock_mode = XFS_ILOCK_EXCL;
73 	xfs_ilock(ip, lock_mode);
74 	return lock_mode;
75 }
76 
77 uint
xfs_ilock_attr_map_shared(struct xfs_inode * ip)78 xfs_ilock_attr_map_shared(
79 	struct xfs_inode	*ip)
80 {
81 	uint			lock_mode = XFS_ILOCK_SHARED;
82 
83 	if (xfs_inode_has_attr_fork(ip) && xfs_need_iread_extents(&ip->i_af))
84 		lock_mode = XFS_ILOCK_EXCL;
85 	xfs_ilock(ip, lock_mode);
86 	return lock_mode;
87 }
88 
89 /*
90  * You can't set both SHARED and EXCL for the same lock,
91  * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_MMAPLOCK_SHARED,
92  * XFS_MMAPLOCK_EXCL, XFS_ILOCK_SHARED, XFS_ILOCK_EXCL are valid values
93  * to set in lock_flags.
94  */
95 static inline void
xfs_lock_flags_assert(uint lock_flags)96 xfs_lock_flags_assert(
97 	uint		lock_flags)
98 {
99 	ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
100 		(XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
101 	ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
102 		(XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
103 	ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
104 		(XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
105 	ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
106 	ASSERT(lock_flags != 0);
107 }
108 
109 /*
110  * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
111  * multi-reader locks: invalidate_lock and the i_lock.  This routine allows
112  * various combinations of the locks to be obtained.
113  *
114  * The 3 locks should always be ordered so that the IO lock is obtained first,
115  * the mmap lock second and the ilock last in order to prevent deadlock.
116  *
117  * Basic locking order:
118  *
119  * i_rwsem -> invalidate_lock -> page_lock -> i_ilock
120  *
121  * mmap_lock locking order:
122  *
123  * i_rwsem -> page lock -> mmap_lock
124  * mmap_lock -> invalidate_lock -> page_lock
125  *
126  * The difference in mmap_lock locking order mean that we cannot hold the
127  * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths
128  * can fault in pages during copy in/out (for buffered IO) or require the
129  * mmap_lock in get_user_pages() to map the user pages into the kernel address
130  * space for direct IO. Similarly the i_rwsem cannot be taken inside a page
131  * fault because page faults already hold the mmap_lock.
132  *
133  * Hence to serialise fully against both syscall and mmap based IO, we need to
134  * take both the i_rwsem and the invalidate_lock. These locks should *only* be
135  * both taken in places where we need to invalidate the page cache in a race
136  * free manner (e.g. truncate, hole punch and other extent manipulation
137  * functions).
138  */
139 void
xfs_ilock(xfs_inode_t * ip,uint lock_flags)140 xfs_ilock(
141 	xfs_inode_t		*ip,
142 	uint			lock_flags)
143 {
144 	trace_xfs_ilock(ip, lock_flags, _RET_IP_);
145 
146 	xfs_lock_flags_assert(lock_flags);
147 
148 	if (lock_flags & XFS_IOLOCK_EXCL) {
149 		down_write_nested(&VFS_I(ip)->i_rwsem,
150 				  XFS_IOLOCK_DEP(lock_flags));
151 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
152 		down_read_nested(&VFS_I(ip)->i_rwsem,
153 				 XFS_IOLOCK_DEP(lock_flags));
154 	}
155 
156 	if (lock_flags & XFS_MMAPLOCK_EXCL) {
157 		down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
158 				  XFS_MMAPLOCK_DEP(lock_flags));
159 	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
160 		down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
161 				 XFS_MMAPLOCK_DEP(lock_flags));
162 	}
163 
164 	if (lock_flags & XFS_ILOCK_EXCL)
165 		down_write_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
166 	else if (lock_flags & XFS_ILOCK_SHARED)
167 		down_read_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
168 }
169 
170 /*
171  * This is just like xfs_ilock(), except that the caller
172  * is guaranteed not to sleep.  It returns 1 if it gets
173  * the requested locks and 0 otherwise.  If the IO lock is
174  * obtained but the inode lock cannot be, then the IO lock
175  * is dropped before returning.
176  *
177  * ip -- the inode being locked
178  * lock_flags -- this parameter indicates the inode's locks to be
179  *       to be locked.  See the comment for xfs_ilock() for a list
180  *	 of valid values.
181  */
182 int
xfs_ilock_nowait(xfs_inode_t * ip,uint lock_flags)183 xfs_ilock_nowait(
184 	xfs_inode_t		*ip,
185 	uint			lock_flags)
186 {
187 	trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
188 
189 	xfs_lock_flags_assert(lock_flags);
190 
191 	if (lock_flags & XFS_IOLOCK_EXCL) {
192 		if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
193 			goto out;
194 	} else if (lock_flags & XFS_IOLOCK_SHARED) {
195 		if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
196 			goto out;
197 	}
198 
199 	if (lock_flags & XFS_MMAPLOCK_EXCL) {
200 		if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
201 			goto out_undo_iolock;
202 	} else if (lock_flags & XFS_MMAPLOCK_SHARED) {
203 		if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
204 			goto out_undo_iolock;
205 	}
206 
207 	if (lock_flags & XFS_ILOCK_EXCL) {
208 		if (!down_write_trylock(&ip->i_lock))
209 			goto out_undo_mmaplock;
210 	} else if (lock_flags & XFS_ILOCK_SHARED) {
211 		if (!down_read_trylock(&ip->i_lock))
212 			goto out_undo_mmaplock;
213 	}
214 	return 1;
215 
216 out_undo_mmaplock:
217 	if (lock_flags & XFS_MMAPLOCK_EXCL)
218 		up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
219 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
220 		up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
221 out_undo_iolock:
222 	if (lock_flags & XFS_IOLOCK_EXCL)
223 		up_write(&VFS_I(ip)->i_rwsem);
224 	else if (lock_flags & XFS_IOLOCK_SHARED)
225 		up_read(&VFS_I(ip)->i_rwsem);
226 out:
227 	return 0;
228 }
229 
230 /*
231  * xfs_iunlock() is used to drop the inode locks acquired with
232  * xfs_ilock() and xfs_ilock_nowait().  The caller must pass
233  * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
234  * that we know which locks to drop.
235  *
236  * ip -- the inode being unlocked
237  * lock_flags -- this parameter indicates the inode's locks to be
238  *       to be unlocked.  See the comment for xfs_ilock() for a list
239  *	 of valid values for this parameter.
240  *
241  */
242 void
xfs_iunlock(xfs_inode_t * ip,uint lock_flags)243 xfs_iunlock(
244 	xfs_inode_t		*ip,
245 	uint			lock_flags)
246 {
247 	xfs_lock_flags_assert(lock_flags);
248 
249 	if (lock_flags & XFS_IOLOCK_EXCL)
250 		up_write(&VFS_I(ip)->i_rwsem);
251 	else if (lock_flags & XFS_IOLOCK_SHARED)
252 		up_read(&VFS_I(ip)->i_rwsem);
253 
254 	if (lock_flags & XFS_MMAPLOCK_EXCL)
255 		up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
256 	else if (lock_flags & XFS_MMAPLOCK_SHARED)
257 		up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
258 
259 	if (lock_flags & XFS_ILOCK_EXCL)
260 		up_write(&ip->i_lock);
261 	else if (lock_flags & XFS_ILOCK_SHARED)
262 		up_read(&ip->i_lock);
263 
264 	trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
265 }
266 
267 /*
268  * give up write locks.  the i/o lock cannot be held nested
269  * if it is being demoted.
270  */
271 void
xfs_ilock_demote(xfs_inode_t * ip,uint lock_flags)272 xfs_ilock_demote(
273 	xfs_inode_t		*ip,
274 	uint			lock_flags)
275 {
276 	ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
277 	ASSERT((lock_flags &
278 		~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
279 
280 	if (lock_flags & XFS_ILOCK_EXCL)
281 		downgrade_write(&ip->i_lock);
282 	if (lock_flags & XFS_MMAPLOCK_EXCL)
283 		downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock);
284 	if (lock_flags & XFS_IOLOCK_EXCL)
285 		downgrade_write(&VFS_I(ip)->i_rwsem);
286 
287 	trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
288 }
289 
290 void
xfs_assert_ilocked(struct xfs_inode * ip,uint lock_flags)291 xfs_assert_ilocked(
292 	struct xfs_inode	*ip,
293 	uint			lock_flags)
294 {
295 	/*
296 	 * Sometimes we assert the ILOCK is held exclusively, but we're in
297 	 * a workqueue, so lockdep doesn't know we're the owner.
298 	 */
299 	if (lock_flags & XFS_ILOCK_SHARED)
300 		rwsem_assert_held(&ip->i_lock);
301 	else if (lock_flags & XFS_ILOCK_EXCL)
302 		rwsem_assert_held_write_nolockdep(&ip->i_lock);
303 
304 	if (lock_flags & XFS_MMAPLOCK_SHARED)
305 		rwsem_assert_held(&VFS_I(ip)->i_mapping->invalidate_lock);
306 	else if (lock_flags & XFS_MMAPLOCK_EXCL)
307 		rwsem_assert_held_write(&VFS_I(ip)->i_mapping->invalidate_lock);
308 
309 	if (lock_flags & XFS_IOLOCK_SHARED)
310 		rwsem_assert_held(&VFS_I(ip)->i_rwsem);
311 	else if (lock_flags & XFS_IOLOCK_EXCL)
312 		rwsem_assert_held_write(&VFS_I(ip)->i_rwsem);
313 }
314 
315 /*
316  * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
317  * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
318  * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
319  * errors and warnings.
320  */
321 #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
322 static bool
xfs_lockdep_subclass_ok(int subclass)323 xfs_lockdep_subclass_ok(
324 	int subclass)
325 {
326 	return subclass < MAX_LOCKDEP_SUBCLASSES;
327 }
328 #else
329 #define xfs_lockdep_subclass_ok(subclass)	(true)
330 #endif
331 
332 /*
333  * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
334  * value. This can be called for any type of inode lock combination, including
335  * parent locking. Care must be taken to ensure we don't overrun the subclass
336  * storage fields in the class mask we build.
337  */
338 static inline uint
xfs_lock_inumorder(uint lock_mode,uint subclass)339 xfs_lock_inumorder(
340 	uint	lock_mode,
341 	uint	subclass)
342 {
343 	uint	class = 0;
344 
345 	ASSERT(!(lock_mode & XFS_ILOCK_PARENT));
346 	ASSERT(xfs_lockdep_subclass_ok(subclass));
347 
348 	if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
349 		ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
350 		class += subclass << XFS_IOLOCK_SHIFT;
351 	}
352 
353 	if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
354 		ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
355 		class += subclass << XFS_MMAPLOCK_SHIFT;
356 	}
357 
358 	if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
359 		ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
360 		class += subclass << XFS_ILOCK_SHIFT;
361 	}
362 
363 	return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
364 }
365 
366 /*
367  * The following routine will lock n inodes in exclusive mode.  We assume the
368  * caller calls us with the inodes in i_ino order.
369  *
370  * We need to detect deadlock where an inode that we lock is in the AIL and we
371  * start waiting for another inode that is locked by a thread in a long running
372  * transaction (such as truncate). This can result in deadlock since the long
373  * running trans might need to wait for the inode we just locked in order to
374  * push the tail and free space in the log.
375  *
376  * xfs_lock_inodes() can only be used to lock one type of lock at a time -
377  * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
378  * lock more than one at a time, lockdep will report false positives saying we
379  * have violated locking orders.
380  */
381 void
xfs_lock_inodes(struct xfs_inode ** ips,int inodes,uint lock_mode)382 xfs_lock_inodes(
383 	struct xfs_inode	**ips,
384 	int			inodes,
385 	uint			lock_mode)
386 {
387 	int			attempts = 0;
388 	uint			i;
389 	int			j;
390 	bool			try_lock;
391 	struct xfs_log_item	*lp;
392 
393 	/*
394 	 * Currently supports between 2 and 5 inodes with exclusive locking.  We
395 	 * support an arbitrary depth of locking here, but absolute limits on
396 	 * inodes depend on the type of locking and the limits placed by
397 	 * lockdep annotations in xfs_lock_inumorder.  These are all checked by
398 	 * the asserts.
399 	 */
400 	ASSERT(ips && inodes >= 2 && inodes <= 5);
401 	ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
402 			    XFS_ILOCK_EXCL));
403 	ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
404 			      XFS_ILOCK_SHARED)));
405 	ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
406 		inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
407 	ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
408 		inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
409 
410 	if (lock_mode & XFS_IOLOCK_EXCL) {
411 		ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
412 	} else if (lock_mode & XFS_MMAPLOCK_EXCL)
413 		ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
414 
415 again:
416 	try_lock = false;
417 	i = 0;
418 	for (; i < inodes; i++) {
419 		ASSERT(ips[i]);
420 
421 		if (i && (ips[i] == ips[i - 1]))	/* Already locked */
422 			continue;
423 
424 		/*
425 		 * If try_lock is not set yet, make sure all locked inodes are
426 		 * not in the AIL.  If any are, set try_lock to be used later.
427 		 */
428 		if (!try_lock) {
429 			for (j = (i - 1); j >= 0 && !try_lock; j--) {
430 				lp = &ips[j]->i_itemp->ili_item;
431 				if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
432 					try_lock = true;
433 			}
434 		}
435 
436 		/*
437 		 * If any of the previous locks we have locked is in the AIL,
438 		 * we must TRY to get the second and subsequent locks. If
439 		 * we can't get any, we must release all we have
440 		 * and try again.
441 		 */
442 		if (!try_lock) {
443 			xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
444 			continue;
445 		}
446 
447 		/* try_lock means we have an inode locked that is in the AIL. */
448 		ASSERT(i != 0);
449 		if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
450 			continue;
451 
452 		/*
453 		 * Unlock all previous guys and try again.  xfs_iunlock will try
454 		 * to push the tail if the inode is in the AIL.
455 		 */
456 		attempts++;
457 		for (j = i - 1; j >= 0; j--) {
458 			/*
459 			 * Check to see if we've already unlocked this one.  Not
460 			 * the first one going back, and the inode ptr is the
461 			 * same.
462 			 */
463 			if (j != (i - 1) && ips[j] == ips[j + 1])
464 				continue;
465 
466 			xfs_iunlock(ips[j], lock_mode);
467 		}
468 
469 		if ((attempts % 5) == 0) {
470 			delay(1); /* Don't just spin the CPU */
471 		}
472 		goto again;
473 	}
474 }
475 
476 /*
477  * xfs_lock_two_inodes() can only be used to lock ilock. The iolock and
478  * mmaplock must be double-locked separately since we use i_rwsem and
479  * invalidate_lock for that. We now support taking one lock EXCL and the
480  * other SHARED.
481  */
482 void
xfs_lock_two_inodes(struct xfs_inode * ip0,uint ip0_mode,struct xfs_inode * ip1,uint ip1_mode)483 xfs_lock_two_inodes(
484 	struct xfs_inode	*ip0,
485 	uint			ip0_mode,
486 	struct xfs_inode	*ip1,
487 	uint			ip1_mode)
488 {
489 	int			attempts = 0;
490 	struct xfs_log_item	*lp;
491 
492 	ASSERT(hweight32(ip0_mode) == 1);
493 	ASSERT(hweight32(ip1_mode) == 1);
494 	ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
495 	ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
496 	ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
497 	ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
498 	ASSERT(ip0->i_ino != ip1->i_ino);
499 
500 	if (ip0->i_ino > ip1->i_ino) {
501 		swap(ip0, ip1);
502 		swap(ip0_mode, ip1_mode);
503 	}
504 
505  again:
506 	xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
507 
508 	/*
509 	 * If the first lock we have locked is in the AIL, we must TRY to get
510 	 * the second lock. If we can't get it, we must release the first one
511 	 * and try again.
512 	 */
513 	lp = &ip0->i_itemp->ili_item;
514 	if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
515 		if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
516 			xfs_iunlock(ip0, ip0_mode);
517 			if ((++attempts % 5) == 0)
518 				delay(1); /* Don't just spin the CPU */
519 			goto again;
520 		}
521 	} else {
522 		xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
523 	}
524 }
525 
526 /*
527  * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
528  * is allowed, otherwise it has to be an exact match. If a CI match is found,
529  * ci_name->name will point to a the actual name (caller must free) or
530  * will be set to NULL if an exact match is found.
531  */
532 int
xfs_lookup(struct xfs_inode * dp,const struct xfs_name * name,struct xfs_inode ** ipp,struct xfs_name * ci_name)533 xfs_lookup(
534 	struct xfs_inode	*dp,
535 	const struct xfs_name	*name,
536 	struct xfs_inode	**ipp,
537 	struct xfs_name		*ci_name)
538 {
539 	xfs_ino_t		inum;
540 	int			error;
541 
542 	trace_xfs_lookup(dp, name);
543 
544 	if (xfs_is_shutdown(dp->i_mount))
545 		return -EIO;
546 	if (xfs_ifork_zapped(dp, XFS_DATA_FORK))
547 		return -EIO;
548 
549 	error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
550 	if (error)
551 		goto out_unlock;
552 
553 	error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
554 	if (error)
555 		goto out_free_name;
556 
557 	/*
558 	 * Fail if a directory entry in the regular directory tree points to
559 	 * a metadata file.
560 	 */
561 	if (XFS_IS_CORRUPT(dp->i_mount, xfs_is_metadir_inode(*ipp))) {
562 		xfs_fs_mark_sick(dp->i_mount, XFS_SICK_FS_METADIR);
563 		error = -EFSCORRUPTED;
564 		goto out_irele;
565 	}
566 
567 	return 0;
568 
569 out_irele:
570 	xfs_irele(*ipp);
571 out_free_name:
572 	if (ci_name)
573 		kfree(ci_name->name);
574 out_unlock:
575 	*ipp = NULL;
576 	return error;
577 }
578 
579 /*
580  * Initialise a newly allocated inode and return the in-core inode to the
581  * caller locked exclusively.
582  *
583  * Caller is responsible for unlocking the inode manually upon return
584  */
585 int
xfs_icreate(struct xfs_trans * tp,xfs_ino_t ino,const struct xfs_icreate_args * args,struct xfs_inode ** ipp)586 xfs_icreate(
587 	struct xfs_trans	*tp,
588 	xfs_ino_t		ino,
589 	const struct xfs_icreate_args *args,
590 	struct xfs_inode	**ipp)
591 {
592 	struct xfs_mount	*mp = tp->t_mountp;
593 	struct xfs_inode	*ip = NULL;
594 	int			error;
595 
596 	/*
597 	 * Get the in-core inode with the lock held exclusively to prevent
598 	 * others from looking at until we're done.
599 	 */
600 	error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
601 	if (error)
602 		return error;
603 
604 	ASSERT(ip != NULL);
605 	xfs_trans_ijoin(tp, ip, 0);
606 	xfs_inode_init(tp, args, ip);
607 
608 	/* now that we have an i_mode we can setup the inode structure */
609 	xfs_setup_inode(ip);
610 
611 	*ipp = ip;
612 	return 0;
613 }
614 
615 /* Return dquots for the ids that will be assigned to a new file. */
616 int
xfs_icreate_dqalloc(const struct xfs_icreate_args * args,struct xfs_dquot ** udqpp,struct xfs_dquot ** gdqpp,struct xfs_dquot ** pdqpp)617 xfs_icreate_dqalloc(
618 	const struct xfs_icreate_args	*args,
619 	struct xfs_dquot		**udqpp,
620 	struct xfs_dquot		**gdqpp,
621 	struct xfs_dquot		**pdqpp)
622 {
623 	struct inode			*dir = VFS_I(args->pip);
624 	kuid_t				uid = GLOBAL_ROOT_UID;
625 	kgid_t				gid = GLOBAL_ROOT_GID;
626 	prid_t				prid = 0;
627 	unsigned int			flags = XFS_QMOPT_QUOTALL;
628 
629 	if (args->idmap) {
630 		/*
631 		 * The uid/gid computation code must match what the VFS uses to
632 		 * assign i_[ug]id.  INHERIT adjusts the gid computation for
633 		 * setgid/grpid systems.
634 		 */
635 		uid = mapped_fsuid(args->idmap, i_user_ns(dir));
636 		gid = mapped_fsgid(args->idmap, i_user_ns(dir));
637 		prid = xfs_get_initial_prid(args->pip);
638 		flags |= XFS_QMOPT_INHERIT;
639 	}
640 
641 	*udqpp = *gdqpp = *pdqpp = NULL;
642 
643 	return xfs_qm_vop_dqalloc(args->pip, uid, gid, prid, flags, udqpp,
644 			gdqpp, pdqpp);
645 }
646 
647 int
xfs_create(const struct xfs_icreate_args * args,struct xfs_name * name,struct xfs_inode ** ipp)648 xfs_create(
649 	const struct xfs_icreate_args *args,
650 	struct xfs_name		*name,
651 	struct xfs_inode	**ipp)
652 {
653 	struct xfs_inode	*dp = args->pip;
654 	struct xfs_dir_update	du = {
655 		.dp		= dp,
656 		.name		= name,
657 	};
658 	struct xfs_mount	*mp = dp->i_mount;
659 	struct xfs_trans	*tp = NULL;
660 	struct xfs_dquot	*udqp;
661 	struct xfs_dquot	*gdqp;
662 	struct xfs_dquot	*pdqp;
663 	struct xfs_trans_res	*tres;
664 	xfs_ino_t		ino;
665 	bool			unlock_dp_on_error = false;
666 	bool			is_dir = S_ISDIR(args->mode);
667 	uint			resblks;
668 	int			error;
669 
670 	trace_xfs_create(dp, name);
671 
672 	if (xfs_is_shutdown(mp))
673 		return -EIO;
674 	if (xfs_ifork_zapped(dp, XFS_DATA_FORK))
675 		return -EIO;
676 
677 	/* Make sure that we have allocated dquot(s) on disk. */
678 	error = xfs_icreate_dqalloc(args, &udqp, &gdqp, &pdqp);
679 	if (error)
680 		return error;
681 
682 	if (is_dir) {
683 		resblks = xfs_mkdir_space_res(mp, name->len);
684 		tres = &M_RES(mp)->tr_mkdir;
685 	} else {
686 		resblks = xfs_create_space_res(mp, name->len);
687 		tres = &M_RES(mp)->tr_create;
688 	}
689 
690 	error = xfs_parent_start(mp, &du.ppargs);
691 	if (error)
692 		goto out_release_dquots;
693 
694 	/*
695 	 * Initially assume that the file does not exist and
696 	 * reserve the resources for that case.  If that is not
697 	 * the case we'll drop the one we have and get a more
698 	 * appropriate transaction later.
699 	 */
700 	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
701 			&tp);
702 	if (error)
703 		goto out_parent;
704 
705 	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
706 	unlock_dp_on_error = true;
707 
708 	/*
709 	 * A newly created regular or special file just has one directory
710 	 * entry pointing to them, but a directory also the "." entry
711 	 * pointing to itself.
712 	 */
713 	error = xfs_dialloc(&tp, args, &ino);
714 	if (!error)
715 		error = xfs_icreate(tp, ino, args, &du.ip);
716 	if (error)
717 		goto out_trans_cancel;
718 
719 	/*
720 	 * Now we join the directory inode to the transaction.  We do not do it
721 	 * earlier because xfs_dialloc might commit the previous transaction
722 	 * (and release all the locks).  An error from here on will result in
723 	 * the transaction cancel unlocking dp so don't do it explicitly in the
724 	 * error path.
725 	 */
726 	xfs_trans_ijoin(tp, dp, 0);
727 
728 	error = xfs_dir_create_child(tp, resblks, &du);
729 	if (error)
730 		goto out_trans_cancel;
731 
732 	/*
733 	 * If this is a synchronous mount, make sure that the
734 	 * create transaction goes to disk before returning to
735 	 * the user.
736 	 */
737 	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
738 		xfs_trans_set_sync(tp);
739 
740 	/*
741 	 * Attach the dquot(s) to the inodes and modify them incore.
742 	 * These ids of the inode couldn't have changed since the new
743 	 * inode has been locked ever since it was created.
744 	 */
745 	xfs_qm_vop_create_dqattach(tp, du.ip, udqp, gdqp, pdqp);
746 
747 	error = xfs_trans_commit(tp);
748 	if (error)
749 		goto out_release_inode;
750 
751 	xfs_qm_dqrele(udqp);
752 	xfs_qm_dqrele(gdqp);
753 	xfs_qm_dqrele(pdqp);
754 
755 	*ipp = du.ip;
756 	xfs_iunlock(du.ip, XFS_ILOCK_EXCL);
757 	xfs_iunlock(dp, XFS_ILOCK_EXCL);
758 	xfs_parent_finish(mp, du.ppargs);
759 	return 0;
760 
761  out_trans_cancel:
762 	xfs_trans_cancel(tp);
763  out_release_inode:
764 	/*
765 	 * Wait until after the current transaction is aborted to finish the
766 	 * setup of the inode and release the inode.  This prevents recursive
767 	 * transactions and deadlocks from xfs_inactive.
768 	 */
769 	if (du.ip) {
770 		xfs_iunlock(du.ip, XFS_ILOCK_EXCL);
771 		xfs_finish_inode_setup(du.ip);
772 		xfs_irele(du.ip);
773 	}
774  out_parent:
775 	xfs_parent_finish(mp, du.ppargs);
776  out_release_dquots:
777 	xfs_qm_dqrele(udqp);
778 	xfs_qm_dqrele(gdqp);
779 	xfs_qm_dqrele(pdqp);
780 
781 	if (unlock_dp_on_error)
782 		xfs_iunlock(dp, XFS_ILOCK_EXCL);
783 	return error;
784 }
785 
786 int
xfs_create_tmpfile(const struct xfs_icreate_args * args,struct xfs_inode ** ipp)787 xfs_create_tmpfile(
788 	const struct xfs_icreate_args *args,
789 	struct xfs_inode	**ipp)
790 {
791 	struct xfs_inode	*dp = args->pip;
792 	struct xfs_mount	*mp = dp->i_mount;
793 	struct xfs_inode	*ip = NULL;
794 	struct xfs_trans	*tp = NULL;
795 	struct xfs_dquot	*udqp;
796 	struct xfs_dquot	*gdqp;
797 	struct xfs_dquot	*pdqp;
798 	struct xfs_trans_res	*tres;
799 	xfs_ino_t		ino;
800 	uint			resblks;
801 	int			error;
802 
803 	ASSERT(args->flags & XFS_ICREATE_TMPFILE);
804 
805 	if (xfs_is_shutdown(mp))
806 		return -EIO;
807 
808 	/* Make sure that we have allocated dquot(s) on disk. */
809 	error = xfs_icreate_dqalloc(args, &udqp, &gdqp, &pdqp);
810 	if (error)
811 		return error;
812 
813 	resblks = XFS_IALLOC_SPACE_RES(mp);
814 	tres = &M_RES(mp)->tr_create_tmpfile;
815 
816 	error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
817 			&tp);
818 	if (error)
819 		goto out_release_dquots;
820 
821 	error = xfs_dialloc(&tp, args, &ino);
822 	if (!error)
823 		error = xfs_icreate(tp, ino, args, &ip);
824 	if (error)
825 		goto out_trans_cancel;
826 
827 	if (xfs_has_wsync(mp))
828 		xfs_trans_set_sync(tp);
829 
830 	/*
831 	 * Attach the dquot(s) to the inodes and modify them incore.
832 	 * These ids of the inode couldn't have changed since the new
833 	 * inode has been locked ever since it was created.
834 	 */
835 	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
836 
837 	error = xfs_iunlink(tp, ip);
838 	if (error)
839 		goto out_trans_cancel;
840 
841 	error = xfs_trans_commit(tp);
842 	if (error)
843 		goto out_release_inode;
844 
845 	xfs_qm_dqrele(udqp);
846 	xfs_qm_dqrele(gdqp);
847 	xfs_qm_dqrele(pdqp);
848 
849 	*ipp = ip;
850 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
851 	return 0;
852 
853  out_trans_cancel:
854 	xfs_trans_cancel(tp);
855  out_release_inode:
856 	/*
857 	 * Wait until after the current transaction is aborted to finish the
858 	 * setup of the inode and release the inode.  This prevents recursive
859 	 * transactions and deadlocks from xfs_inactive.
860 	 */
861 	if (ip) {
862 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
863 		xfs_finish_inode_setup(ip);
864 		xfs_irele(ip);
865 	}
866  out_release_dquots:
867 	xfs_qm_dqrele(udqp);
868 	xfs_qm_dqrele(gdqp);
869 	xfs_qm_dqrele(pdqp);
870 
871 	return error;
872 }
873 
874 static inline int
xfs_projid_differ(struct xfs_inode * tdp,struct xfs_inode * sip)875 xfs_projid_differ(
876 	struct xfs_inode	*tdp,
877 	struct xfs_inode	*sip)
878 {
879 	/*
880 	 * If we are using project inheritance, we only allow hard link/renames
881 	 * creation in our tree when the project IDs are the same; else
882 	 * the tree quota mechanism could be circumvented.
883 	 */
884 	if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
885 		     tdp->i_projid != sip->i_projid)) {
886 		/*
887 		 * Project quota setup skips special files which can
888 		 * leave inodes in a PROJINHERIT directory without a
889 		 * project ID set. We need to allow links to be made
890 		 * to these "project-less" inodes because userspace
891 		 * expects them to succeed after project ID setup,
892 		 * but everything else should be rejected.
893 		 */
894 		if (!special_file(VFS_I(sip)->i_mode) ||
895 		    sip->i_projid != 0) {
896 			return -EXDEV;
897 		}
898 	}
899 
900 	return 0;
901 }
902 
903 int
xfs_link(struct xfs_inode * tdp,struct xfs_inode * sip,struct xfs_name * target_name)904 xfs_link(
905 	struct xfs_inode	*tdp,
906 	struct xfs_inode	*sip,
907 	struct xfs_name		*target_name)
908 {
909 	struct xfs_dir_update	du = {
910 		.dp		= tdp,
911 		.name		= target_name,
912 		.ip		= sip,
913 	};
914 	struct xfs_mount	*mp = tdp->i_mount;
915 	struct xfs_trans	*tp;
916 	int			error, nospace_error = 0;
917 	int			resblks;
918 
919 	trace_xfs_link(tdp, target_name);
920 
921 	ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
922 
923 	if (xfs_is_shutdown(mp))
924 		return -EIO;
925 	if (xfs_ifork_zapped(tdp, XFS_DATA_FORK))
926 		return -EIO;
927 
928 	error = xfs_qm_dqattach(sip);
929 	if (error)
930 		goto std_return;
931 
932 	error = xfs_qm_dqattach(tdp);
933 	if (error)
934 		goto std_return;
935 
936 	error = xfs_parent_start(mp, &du.ppargs);
937 	if (error)
938 		goto std_return;
939 
940 	resblks = xfs_link_space_res(mp, target_name->len);
941 	error = xfs_trans_alloc_dir(tdp, &M_RES(mp)->tr_link, sip, &resblks,
942 			&tp, &nospace_error);
943 	if (error)
944 		goto out_parent;
945 
946 	/*
947 	 * We don't allow reservationless or quotaless hardlinking when parent
948 	 * pointers are enabled because we can't back out if the xattrs must
949 	 * grow.
950 	 */
951 	if (du.ppargs && nospace_error) {
952 		error = nospace_error;
953 		goto error_return;
954 	}
955 
956 	error = xfs_projid_differ(tdp, sip);
957 	if (error)
958 		goto error_return;
959 
960 	error = xfs_dir_add_child(tp, resblks, &du);
961 	if (error)
962 		goto error_return;
963 
964 	/*
965 	 * If this is a synchronous mount, make sure that the
966 	 * link transaction goes to disk before returning to
967 	 * the user.
968 	 */
969 	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
970 		xfs_trans_set_sync(tp);
971 
972 	error = xfs_trans_commit(tp);
973 	xfs_iunlock(tdp, XFS_ILOCK_EXCL);
974 	xfs_iunlock(sip, XFS_ILOCK_EXCL);
975 	xfs_parent_finish(mp, du.ppargs);
976 	return error;
977 
978  error_return:
979 	xfs_trans_cancel(tp);
980 	xfs_iunlock(tdp, XFS_ILOCK_EXCL);
981 	xfs_iunlock(sip, XFS_ILOCK_EXCL);
982  out_parent:
983 	xfs_parent_finish(mp, du.ppargs);
984  std_return:
985 	if (error == -ENOSPC && nospace_error)
986 		error = nospace_error;
987 	return error;
988 }
989 
990 /* Clear the reflink flag and the cowblocks tag if possible. */
991 static void
xfs_itruncate_clear_reflink_flags(struct xfs_inode * ip)992 xfs_itruncate_clear_reflink_flags(
993 	struct xfs_inode	*ip)
994 {
995 	struct xfs_ifork	*dfork;
996 	struct xfs_ifork	*cfork;
997 
998 	if (!xfs_is_reflink_inode(ip))
999 		return;
1000 	dfork = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1001 	cfork = xfs_ifork_ptr(ip, XFS_COW_FORK);
1002 	if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1003 		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1004 	if (cfork->if_bytes == 0)
1005 		xfs_inode_clear_cowblocks_tag(ip);
1006 }
1007 
1008 /*
1009  * Free up the underlying blocks past new_size.  The new size must be smaller
1010  * than the current size.  This routine can be used both for the attribute and
1011  * data fork, and does not modify the inode size, which is left to the caller.
1012  *
1013  * The transaction passed to this routine must have made a permanent log
1014  * reservation of at least XFS_ITRUNCATE_LOG_RES.  This routine may commit the
1015  * given transaction and start new ones, so make sure everything involved in
1016  * the transaction is tidy before calling here.  Some transaction will be
1017  * returned to the caller to be committed.  The incoming transaction must
1018  * already include the inode, and both inode locks must be held exclusively.
1019  * The inode must also be "held" within the transaction.  On return the inode
1020  * will be "held" within the returned transaction.  This routine does NOT
1021  * require any disk space to be reserved for it within the transaction.
1022  *
1023  * If we get an error, we must return with the inode locked and linked into the
1024  * current transaction. This keeps things simple for the higher level code,
1025  * because it always knows that the inode is locked and held in the transaction
1026  * that returns to it whether errors occur or not.  We don't mark the inode
1027  * dirty on error so that transactions can be easily aborted if possible.
1028  */
1029 int
xfs_itruncate_extents_flags(struct xfs_trans ** tpp,struct xfs_inode * ip,int whichfork,xfs_fsize_t new_size,int flags)1030 xfs_itruncate_extents_flags(
1031 	struct xfs_trans	**tpp,
1032 	struct xfs_inode	*ip,
1033 	int			whichfork,
1034 	xfs_fsize_t		new_size,
1035 	int			flags)
1036 {
1037 	struct xfs_mount	*mp = ip->i_mount;
1038 	struct xfs_trans	*tp = *tpp;
1039 	xfs_fileoff_t		first_unmap_block;
1040 	int			error = 0;
1041 
1042 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1043 	if (icount_read(VFS_I(ip)))
1044 		xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL);
1045 	if (whichfork == XFS_DATA_FORK)
1046 		ASSERT(new_size <= XFS_ISIZE(ip));
1047 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1048 	ASSERT(ip->i_itemp != NULL);
1049 	ASSERT(ip->i_itemp->ili_lock_flags == 0);
1050 	ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1051 
1052 	trace_xfs_itruncate_extents_start(ip, new_size);
1053 
1054 	flags |= xfs_bmapi_aflag(whichfork);
1055 
1056 	/*
1057 	 * Since it is possible for space to become allocated beyond
1058 	 * the end of the file (in a crash where the space is allocated
1059 	 * but the inode size is not yet updated), simply remove any
1060 	 * blocks which show up between the new EOF and the maximum
1061 	 * possible file size.
1062 	 *
1063 	 * We have to free all the blocks to the bmbt maximum offset, even if
1064 	 * the page cache can't scale that far.
1065 	 */
1066 	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1067 	if (!xfs_verify_fileoff(mp, first_unmap_block)) {
1068 		WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
1069 		return 0;
1070 	}
1071 
1072 	error = xfs_bunmapi_range(&tp, ip, flags, first_unmap_block,
1073 			XFS_MAX_FILEOFF);
1074 	if (error)
1075 		goto out;
1076 
1077 	if (whichfork == XFS_DATA_FORK) {
1078 		/* Remove all pending CoW reservations. */
1079 		error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1080 				first_unmap_block, XFS_MAX_FILEOFF, true);
1081 		if (error)
1082 			goto out;
1083 
1084 		xfs_itruncate_clear_reflink_flags(ip);
1085 	}
1086 
1087 	/*
1088 	 * Always re-log the inode so that our permanent transaction can keep
1089 	 * on rolling it forward in the log.
1090 	 */
1091 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1092 
1093 	trace_xfs_itruncate_extents_end(ip, new_size);
1094 
1095 out:
1096 	*tpp = tp;
1097 	return error;
1098 }
1099 
1100 /*
1101  * Mark all the buffers attached to this directory stale.  In theory we should
1102  * never be freeing a directory with any blocks at all, but this covers the
1103  * case where we've recovered a directory swap with a "temporary" directory
1104  * created by online repair and now need to dump it.
1105  */
1106 STATIC void
xfs_inactive_dir(struct xfs_inode * dp)1107 xfs_inactive_dir(
1108 	struct xfs_inode	*dp)
1109 {
1110 	struct xfs_iext_cursor	icur;
1111 	struct xfs_bmbt_irec	got;
1112 	struct xfs_mount	*mp = dp->i_mount;
1113 	struct xfs_da_geometry	*geo = mp->m_dir_geo;
1114 	struct xfs_ifork	*ifp = xfs_ifork_ptr(dp, XFS_DATA_FORK);
1115 	xfs_fileoff_t		off;
1116 
1117 	/*
1118 	 * Invalidate each directory block.  All directory blocks are of
1119 	 * fsbcount length and alignment, so we only need to walk those same
1120 	 * offsets.  We hold the only reference to this inode, so we must wait
1121 	 * for the buffer locks.
1122 	 */
1123 	for_each_xfs_iext(ifp, &icur, &got) {
1124 		for (off = round_up(got.br_startoff, geo->fsbcount);
1125 		     off < got.br_startoff + got.br_blockcount;
1126 		     off += geo->fsbcount) {
1127 			struct xfs_buf	*bp = NULL;
1128 			xfs_fsblock_t	fsbno;
1129 			int		error;
1130 
1131 			fsbno = (off - got.br_startoff) + got.br_startblock;
1132 			error = xfs_buf_incore(mp->m_ddev_targp,
1133 					XFS_FSB_TO_DADDR(mp, fsbno),
1134 					XFS_FSB_TO_BB(mp, geo->fsbcount),
1135 					XBF_LIVESCAN, &bp);
1136 			if (error)
1137 				continue;
1138 
1139 			xfs_buf_stale(bp);
1140 			xfs_buf_relse(bp);
1141 		}
1142 	}
1143 }
1144 
1145 /*
1146  * xfs_inactive_truncate
1147  *
1148  * Called to perform a truncate when an inode becomes unlinked.
1149  */
1150 STATIC int
xfs_inactive_truncate(struct xfs_inode * ip)1151 xfs_inactive_truncate(
1152 	struct xfs_inode *ip)
1153 {
1154 	struct xfs_mount	*mp = ip->i_mount;
1155 	struct xfs_trans	*tp;
1156 	int			error;
1157 
1158 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1159 	if (error) {
1160 		ASSERT(xfs_is_shutdown(mp));
1161 		return error;
1162 	}
1163 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1164 	xfs_trans_ijoin(tp, ip, 0);
1165 
1166 	/*
1167 	 * Log the inode size first to prevent stale data exposure in the event
1168 	 * of a system crash before the truncate completes. See the related
1169 	 * comment in xfs_vn_setattr_size() for details.
1170 	 */
1171 	ip->i_disk_size = 0;
1172 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1173 
1174 	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1175 	if (error)
1176 		goto error_trans_cancel;
1177 
1178 	ASSERT(ip->i_df.if_nextents == 0);
1179 
1180 	error = xfs_trans_commit(tp);
1181 	if (error)
1182 		goto error_unlock;
1183 
1184 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1185 	return 0;
1186 
1187 error_trans_cancel:
1188 	xfs_trans_cancel(tp);
1189 error_unlock:
1190 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1191 	return error;
1192 }
1193 
1194 /*
1195  * xfs_inactive_ifree()
1196  *
1197  * Perform the inode free when an inode is unlinked.
1198  */
1199 STATIC int
xfs_inactive_ifree(struct xfs_inode * ip)1200 xfs_inactive_ifree(
1201 	struct xfs_inode *ip)
1202 {
1203 	struct xfs_mount	*mp = ip->i_mount;
1204 	struct xfs_trans	*tp;
1205 	int			error;
1206 
1207 	/*
1208 	 * We try to use a per-AG reservation for any block needed by the finobt
1209 	 * tree, but as the finobt feature predates the per-AG reservation
1210 	 * support a degraded file system might not have enough space for the
1211 	 * reservation at mount time.  In that case try to dip into the reserved
1212 	 * pool and pray.
1213 	 *
1214 	 * Send a warning if the reservation does happen to fail, as the inode
1215 	 * now remains allocated and sits on the unlinked list until the fs is
1216 	 * repaired.
1217 	 */
1218 	if (unlikely(mp->m_finobt_nores)) {
1219 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1220 				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1221 				&tp);
1222 	} else {
1223 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1224 	}
1225 	if (error) {
1226 		if (error == -ENOSPC) {
1227 			xfs_warn_ratelimited(mp,
1228 			"Failed to remove inode(s) from unlinked list. "
1229 			"Please free space, unmount and run xfs_repair.");
1230 		} else {
1231 			ASSERT(xfs_is_shutdown(mp));
1232 		}
1233 		return error;
1234 	}
1235 
1236 	/*
1237 	 * We do not hold the inode locked across the entire rolling transaction
1238 	 * here. We only need to hold it for the first transaction that
1239 	 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
1240 	 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
1241 	 * here breaks the relationship between cluster buffer invalidation and
1242 	 * stale inode invalidation on cluster buffer item journal commit
1243 	 * completion, and can result in leaving dirty stale inodes hanging
1244 	 * around in memory.
1245 	 *
1246 	 * We have no need for serialising this inode operation against other
1247 	 * operations - we freed the inode and hence reallocation is required
1248 	 * and that will serialise on reallocating the space the deferops need
1249 	 * to free. Hence we can unlock the inode on the first commit of
1250 	 * the transaction rather than roll it right through the deferops. This
1251 	 * avoids relogging the XFS_ISTALE inode.
1252 	 *
1253 	 * We check that xfs_ifree() hasn't grown an internal transaction roll
1254 	 * by asserting that the inode is still locked when it returns.
1255 	 */
1256 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1257 	xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1258 
1259 	error = xfs_ifree(tp, ip);
1260 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1261 	if (error) {
1262 		/*
1263 		 * If we fail to free the inode, shut down.  The cancel
1264 		 * might do that, we need to make sure.  Otherwise the
1265 		 * inode might be lost for a long time or forever.
1266 		 */
1267 		if (!xfs_is_shutdown(mp)) {
1268 			xfs_notice(mp, "%s: xfs_ifree returned error %d",
1269 				__func__, error);
1270 			xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1271 		}
1272 		xfs_trans_cancel(tp);
1273 		return error;
1274 	}
1275 
1276 	/*
1277 	 * Credit the quota account(s). The inode is gone.
1278 	 */
1279 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1280 
1281 	return xfs_trans_commit(tp);
1282 }
1283 
1284 /*
1285  * Returns true if we need to update the on-disk metadata before we can free
1286  * the memory used by this inode.  Updates include freeing post-eof
1287  * preallocations; freeing COW staging extents; and marking the inode free in
1288  * the inobt if it is on the unlinked list.
1289  */
1290 bool
xfs_inode_needs_inactive(struct xfs_inode * ip)1291 xfs_inode_needs_inactive(
1292 	struct xfs_inode	*ip)
1293 {
1294 	struct xfs_mount	*mp = ip->i_mount;
1295 	struct xfs_ifork	*cow_ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
1296 
1297 	/*
1298 	 * If the inode is already free, then there can be nothing
1299 	 * to clean up here.
1300 	 */
1301 	if (VFS_I(ip)->i_mode == 0)
1302 		return false;
1303 
1304 	/*
1305 	 * If this is a read-only mount, don't do this (would generate I/O)
1306 	 * unless we're in log recovery and cleaning the iunlinked list.
1307 	 */
1308 	if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
1309 		return false;
1310 
1311 	/* If the log isn't running, push inodes straight to reclaim. */
1312 	if (xfs_is_shutdown(mp) || xfs_has_norecovery(mp))
1313 		return false;
1314 
1315 	/* Metadata inodes require explicit resource cleanup. */
1316 	if (xfs_is_internal_inode(ip))
1317 		return false;
1318 
1319 	/* Want to clean out the cow blocks if there are any. */
1320 	if (cow_ifp && cow_ifp->if_bytes > 0)
1321 		return true;
1322 
1323 	/* Unlinked files must be freed. */
1324 	if (VFS_I(ip)->i_nlink == 0)
1325 		return true;
1326 
1327 	/*
1328 	 * This file isn't being freed, so check if there are post-eof blocks
1329 	 * to free.
1330 	 *
1331 	 * Note: don't bother with iolock here since lockdep complains about
1332 	 * acquiring it in reclaim context. We have the only reference to the
1333 	 * inode at this point anyways.
1334 	 */
1335 	return xfs_can_free_eofblocks(ip);
1336 }
1337 
1338 /*
1339  * Save health status somewhere, if we're dumping an inode with uncorrected
1340  * errors and online repair isn't running.
1341  */
1342 static inline void
xfs_inactive_health(struct xfs_inode * ip)1343 xfs_inactive_health(
1344 	struct xfs_inode	*ip)
1345 {
1346 	struct xfs_mount	*mp = ip->i_mount;
1347 	struct xfs_perag	*pag;
1348 	unsigned int		sick;
1349 	unsigned int		checked;
1350 
1351 	xfs_inode_measure_sickness(ip, &sick, &checked);
1352 	if (!sick)
1353 		return;
1354 
1355 	trace_xfs_inode_unfixed_corruption(ip, sick);
1356 
1357 	if (sick & XFS_SICK_INO_FORGET)
1358 		return;
1359 
1360 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1361 	if (!pag) {
1362 		/* There had better still be a perag structure! */
1363 		ASSERT(0);
1364 		return;
1365 	}
1366 
1367 	xfs_ag_mark_sick(pag, XFS_SICK_AG_INODES);
1368 	xfs_perag_put(pag);
1369 }
1370 
1371 /*
1372  * xfs_inactive
1373  *
1374  * This is called when the vnode reference count for the vnode
1375  * goes to zero.  If the file has been unlinked, then it must
1376  * now be truncated.  Also, we clear all of the read-ahead state
1377  * kept for the inode here since the file is now closed.
1378  */
1379 int
xfs_inactive(xfs_inode_t * ip)1380 xfs_inactive(
1381 	xfs_inode_t	*ip)
1382 {
1383 	struct xfs_mount	*mp;
1384 	int			error = 0;
1385 	int			truncate = 0;
1386 
1387 	/*
1388 	 * If the inode is already free, then there can be nothing
1389 	 * to clean up here.
1390 	 */
1391 	if (VFS_I(ip)->i_mode == 0) {
1392 		ASSERT(ip->i_df.if_broot_bytes == 0);
1393 		goto out;
1394 	}
1395 
1396 	mp = ip->i_mount;
1397 	ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1398 
1399 	xfs_inactive_health(ip);
1400 
1401 	/*
1402 	 * If this is a read-only mount, don't do this (would generate I/O)
1403 	 * unless we're in log recovery and cleaning the iunlinked list.
1404 	 */
1405 	if (xfs_is_readonly(mp) && !xlog_recovery_needed(mp->m_log))
1406 		goto out;
1407 
1408 	/* Metadata inodes require explicit resource cleanup. */
1409 	if (xfs_is_internal_inode(ip))
1410 		goto out;
1411 
1412 	/* Try to clean out the cow blocks if there are any. */
1413 	if (xfs_inode_has_cow_data(ip)) {
1414 		error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1415 		if (error)
1416 			goto out;
1417 	}
1418 
1419 	if (VFS_I(ip)->i_nlink != 0) {
1420 		/*
1421 		 * Note: don't bother with iolock here since lockdep complains
1422 		 * about acquiring it in reclaim context. We have the only
1423 		 * reference to the inode at this point anyways.
1424 		 */
1425 		if (xfs_can_free_eofblocks(ip))
1426 			error = xfs_free_eofblocks(ip);
1427 
1428 		goto out;
1429 	}
1430 
1431 	if (S_ISREG(VFS_I(ip)->i_mode) &&
1432 	    (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
1433 	     xfs_inode_has_filedata(ip)))
1434 		truncate = 1;
1435 
1436 	if (xfs_iflags_test(ip, XFS_IQUOTAUNCHECKED)) {
1437 		/*
1438 		 * If this inode is being inactivated during a quotacheck and
1439 		 * has not yet been scanned by quotacheck, we /must/ remove
1440 		 * the dquots from the inode before inactivation changes the
1441 		 * block and inode counts.  Most probably this is a result of
1442 		 * reloading the incore iunlinked list to purge unrecovered
1443 		 * unlinked inodes.
1444 		 */
1445 		xfs_qm_dqdetach(ip);
1446 	} else {
1447 		error = xfs_qm_dqattach(ip);
1448 		if (error)
1449 			goto out;
1450 	}
1451 
1452 	if (S_ISDIR(VFS_I(ip)->i_mode) && ip->i_df.if_nextents > 0) {
1453 		xfs_inactive_dir(ip);
1454 		truncate = 1;
1455 	}
1456 
1457 	if (S_ISLNK(VFS_I(ip)->i_mode))
1458 		error = xfs_inactive_symlink(ip);
1459 	else if (truncate)
1460 		error = xfs_inactive_truncate(ip);
1461 	if (error)
1462 		goto out;
1463 
1464 	/*
1465 	 * If there are attributes associated with the file then blow them away
1466 	 * now.  The code calls a routine that recursively deconstructs the
1467 	 * attribute fork. If also blows away the in-core attribute fork.
1468 	 */
1469 	if (xfs_inode_has_attr_fork(ip)) {
1470 		error = xfs_attr_inactive(ip);
1471 		if (error)
1472 			goto out;
1473 	}
1474 
1475 	ASSERT(ip->i_forkoff == 0);
1476 
1477 	/*
1478 	 * Free the inode.
1479 	 */
1480 	error = xfs_inactive_ifree(ip);
1481 
1482 out:
1483 	/*
1484 	 * We're done making metadata updates for this inode, so we can release
1485 	 * the attached dquots.
1486 	 */
1487 	xfs_qm_dqdetach(ip);
1488 	return error;
1489 }
1490 
1491 /*
1492  * Find an inode on the unlinked list. This does not take references to the
1493  * inode as we have existence guarantees by holding the AGI buffer lock and that
1494  * only unlinked, referenced inodes can be on the unlinked inode list.  If we
1495  * don't find the inode in cache, then let the caller handle the situation.
1496  */
1497 struct xfs_inode *
xfs_iunlink_lookup(struct xfs_perag * pag,xfs_agino_t agino)1498 xfs_iunlink_lookup(
1499 	struct xfs_perag	*pag,
1500 	xfs_agino_t		agino)
1501 {
1502 	struct xfs_inode	*ip;
1503 
1504 	rcu_read_lock();
1505 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
1506 	if (!ip) {
1507 		/* Caller can handle inode not being in memory. */
1508 		rcu_read_unlock();
1509 		return NULL;
1510 	}
1511 
1512 	/*
1513 	 * Inode in RCU freeing limbo should not happen.  Warn about this and
1514 	 * let the caller handle the failure.
1515 	 */
1516 	if (WARN_ON_ONCE(!ip->i_ino)) {
1517 		rcu_read_unlock();
1518 		return NULL;
1519 	}
1520 	ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM));
1521 	rcu_read_unlock();
1522 	return ip;
1523 }
1524 
1525 /*
1526  * Load the inode @next_agino into the cache and set its prev_unlinked pointer
1527  * to @prev_agino.  Caller must hold the AGI to synchronize with other changes
1528  * to the unlinked list.
1529  */
1530 int
xfs_iunlink_reload_next(struct xfs_trans * tp,struct xfs_buf * agibp,xfs_agino_t prev_agino,xfs_agino_t next_agino)1531 xfs_iunlink_reload_next(
1532 	struct xfs_trans	*tp,
1533 	struct xfs_buf		*agibp,
1534 	xfs_agino_t		prev_agino,
1535 	xfs_agino_t		next_agino)
1536 {
1537 	struct xfs_perag	*pag = agibp->b_pag;
1538 	struct xfs_mount	*mp = pag_mount(pag);
1539 	struct xfs_inode	*next_ip = NULL;
1540 	int			error;
1541 
1542 	ASSERT(next_agino != NULLAGINO);
1543 
1544 #ifdef DEBUG
1545 	rcu_read_lock();
1546 	next_ip = radix_tree_lookup(&pag->pag_ici_root, next_agino);
1547 	ASSERT(next_ip == NULL);
1548 	rcu_read_unlock();
1549 #endif
1550 
1551 	xfs_info_ratelimited(mp,
1552  "Found unrecovered unlinked inode 0x%x in AG 0x%x.  Initiating recovery.",
1553 			next_agino, pag_agno(pag));
1554 
1555 	/*
1556 	 * Use an untrusted lookup just to be cautious in case the AGI has been
1557 	 * corrupted and now points at a free inode.  That shouldn't happen,
1558 	 * but we'd rather shut down now since we're already running in a weird
1559 	 * situation.
1560 	 */
1561 	error = xfs_iget(mp, tp, xfs_agino_to_ino(pag, next_agino),
1562 			XFS_IGET_UNTRUSTED, 0, &next_ip);
1563 	if (error) {
1564 		xfs_ag_mark_sick(pag, XFS_SICK_AG_AGI);
1565 		return error;
1566 	}
1567 
1568 	/* If this is not an unlinked inode, something is very wrong. */
1569 	if (VFS_I(next_ip)->i_nlink != 0) {
1570 		xfs_ag_mark_sick(pag, XFS_SICK_AG_AGI);
1571 		error = -EFSCORRUPTED;
1572 		goto rele;
1573 	}
1574 
1575 	next_ip->i_prev_unlinked = prev_agino;
1576 	trace_xfs_iunlink_reload_next(next_ip);
1577 rele:
1578 	ASSERT(!(inode_state_read_once(VFS_I(next_ip)) & I_DONTCACHE));
1579 	if (xfs_is_quotacheck_running(mp) && next_ip)
1580 		xfs_iflags_set(next_ip, XFS_IQUOTAUNCHECKED);
1581 	xfs_irele(next_ip);
1582 	return error;
1583 }
1584 
1585 /*
1586  * Look up the inode number specified and if it is not already marked XFS_ISTALE
1587  * mark it stale. We should only find clean inodes in this lookup that aren't
1588  * already stale.
1589  */
1590 static void
xfs_ifree_mark_inode_stale(struct xfs_perag * pag,struct xfs_inode * free_ip,xfs_ino_t inum)1591 xfs_ifree_mark_inode_stale(
1592 	struct xfs_perag	*pag,
1593 	struct xfs_inode	*free_ip,
1594 	xfs_ino_t		inum)
1595 {
1596 	struct xfs_mount	*mp = pag_mount(pag);
1597 	struct xfs_inode_log_item *iip;
1598 	struct xfs_inode	*ip;
1599 
1600 retry:
1601 	rcu_read_lock();
1602 	ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
1603 
1604 	/* Inode not in memory, nothing to do */
1605 	if (!ip) {
1606 		rcu_read_unlock();
1607 		return;
1608 	}
1609 
1610 	/*
1611 	 * because this is an RCU protected lookup, we could find a recently
1612 	 * freed or even reallocated inode during the lookup. We need to check
1613 	 * under the i_flags_lock for a valid inode here. Skip it if it is not
1614 	 * valid, the wrong inode or stale.
1615 	 */
1616 	spin_lock(&ip->i_flags_lock);
1617 	if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
1618 		goto out_iflags_unlock;
1619 
1620 	/*
1621 	 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
1622 	 * other inodes that we did not find in the list attached to the buffer
1623 	 * and are not already marked stale. If we can't lock it, back off and
1624 	 * retry.
1625 	 */
1626 	if (ip != free_ip) {
1627 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
1628 			spin_unlock(&ip->i_flags_lock);
1629 			rcu_read_unlock();
1630 			delay(1);
1631 			goto retry;
1632 		}
1633 	}
1634 	ip->i_flags |= XFS_ISTALE;
1635 
1636 	/*
1637 	 * If the inode is flushing, it is already attached to the buffer.  All
1638 	 * we needed to do here is mark the inode stale so buffer IO completion
1639 	 * will remove it from the AIL.
1640 	 */
1641 	iip = ip->i_itemp;
1642 	if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
1643 		ASSERT(!list_empty(&iip->ili_item.li_bio_list));
1644 		ASSERT(iip->ili_last_fields || xlog_is_shutdown(mp->m_log));
1645 		goto out_iunlock;
1646 	}
1647 
1648 	/*
1649 	 * Inodes not attached to the buffer can be released immediately.
1650 	 * Everything else has to go through xfs_iflush_abort() on journal
1651 	 * commit as the flock synchronises removal of the inode from the
1652 	 * cluster buffer against inode reclaim.
1653 	 */
1654 	if (!iip || list_empty(&iip->ili_item.li_bio_list))
1655 		goto out_iunlock;
1656 
1657 	__xfs_iflags_set(ip, XFS_IFLUSHING);
1658 	spin_unlock(&ip->i_flags_lock);
1659 	rcu_read_unlock();
1660 
1661 	/* we have a dirty inode in memory that has not yet been flushed. */
1662 	spin_lock(&iip->ili_lock);
1663 	iip->ili_last_fields = iip->ili_fields;
1664 	iip->ili_fields = 0;
1665 	spin_unlock(&iip->ili_lock);
1666 	ASSERT(iip->ili_last_fields);
1667 
1668 	if (ip != free_ip)
1669 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1670 	return;
1671 
1672 out_iunlock:
1673 	if (ip != free_ip)
1674 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1675 out_iflags_unlock:
1676 	spin_unlock(&ip->i_flags_lock);
1677 	rcu_read_unlock();
1678 }
1679 
1680 /*
1681  * A big issue when freeing the inode cluster is that we _cannot_ skip any
1682  * inodes that are in memory - they all must be marked stale and attached to
1683  * the cluster buffer.
1684  */
1685 static int
xfs_ifree_cluster(struct xfs_trans * tp,struct xfs_perag * pag,struct xfs_inode * free_ip,struct xfs_icluster * xic)1686 xfs_ifree_cluster(
1687 	struct xfs_trans	*tp,
1688 	struct xfs_perag	*pag,
1689 	struct xfs_inode	*free_ip,
1690 	struct xfs_icluster	*xic)
1691 {
1692 	struct xfs_mount	*mp = free_ip->i_mount;
1693 	struct xfs_ino_geometry	*igeo = M_IGEO(mp);
1694 	struct xfs_buf		*bp;
1695 	xfs_daddr_t		blkno;
1696 	xfs_ino_t		inum = xic->first_ino;
1697 	int			nbufs;
1698 	int			i, j;
1699 	int			ioffset;
1700 	int			error;
1701 
1702 	nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
1703 
1704 	for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
1705 		/*
1706 		 * The allocation bitmap tells us which inodes of the chunk were
1707 		 * physically allocated. Skip the cluster if an inode falls into
1708 		 * a sparse region.
1709 		 */
1710 		ioffset = inum - xic->first_ino;
1711 		if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
1712 			ASSERT(ioffset % igeo->inodes_per_cluster == 0);
1713 			continue;
1714 		}
1715 
1716 		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
1717 					 XFS_INO_TO_AGBNO(mp, inum));
1718 
1719 		/*
1720 		 * We obtain and lock the backing buffer first in the process
1721 		 * here to ensure dirty inodes attached to the buffer remain in
1722 		 * the flushing state while we mark them stale.
1723 		 *
1724 		 * If we scan the in-memory inodes first, then buffer IO can
1725 		 * complete before we get a lock on it, and hence we may fail
1726 		 * to mark all the active inodes on the buffer stale.
1727 		 */
1728 		error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
1729 				mp->m_bsize * igeo->blocks_per_cluster, 0, &bp);
1730 		if (error)
1731 			return error;
1732 
1733 		/*
1734 		 * This buffer may not have been correctly initialised as we
1735 		 * didn't read it from disk. That's not important because we are
1736 		 * only using to mark the buffer as stale in the log, and to
1737 		 * attach stale cached inodes on it.
1738 		 *
1739 		 * For the inode that triggered the cluster freeing, this
1740 		 * attachment may occur in xfs_inode_item_precommit() after we
1741 		 * have marked this buffer stale.  If this buffer was not in
1742 		 * memory before xfs_ifree_cluster() started, it will not be
1743 		 * marked XBF_DONE and this will cause problems later in
1744 		 * xfs_inode_item_precommit() when we trip over a (stale, !done)
1745 		 * buffer to attached to the transaction.
1746 		 *
1747 		 * Hence we have to mark the buffer as XFS_DONE here. This is
1748 		 * safe because we are also marking the buffer as XBF_STALE and
1749 		 * XFS_BLI_STALE. That means it will never be dispatched for
1750 		 * IO and it won't be unlocked until the cluster freeing has
1751 		 * been committed to the journal and the buffer unpinned. If it
1752 		 * is written, we want to know about it, and we want it to
1753 		 * fail. We can acheive this by adding a write verifier to the
1754 		 * buffer.
1755 		 */
1756 		bp->b_flags |= XBF_DONE;
1757 		bp->b_ops = &xfs_inode_buf_ops;
1758 
1759 		/*
1760 		 * Now we need to set all the cached clean inodes as XFS_ISTALE,
1761 		 * too. This requires lookups, and will skip inodes that we've
1762 		 * already marked XFS_ISTALE.
1763 		 */
1764 		for (i = 0; i < igeo->inodes_per_cluster; i++)
1765 			xfs_ifree_mark_inode_stale(pag, free_ip, inum + i);
1766 
1767 		xfs_trans_stale_inode_buf(tp, bp);
1768 		xfs_trans_binval(tp, bp);
1769 	}
1770 	return 0;
1771 }
1772 
1773 /*
1774  * This is called to return an inode to the inode free list.  The inode should
1775  * already be truncated to 0 length and have no pages associated with it.  This
1776  * routine also assumes that the inode is already a part of the transaction.
1777  *
1778  * The on-disk copy of the inode will have been added to the list of unlinked
1779  * inodes in the AGI. We need to remove the inode from that list atomically with
1780  * respect to freeing it here.
1781  */
1782 int
xfs_ifree(struct xfs_trans * tp,struct xfs_inode * ip)1783 xfs_ifree(
1784 	struct xfs_trans	*tp,
1785 	struct xfs_inode	*ip)
1786 {
1787 	struct xfs_mount	*mp = ip->i_mount;
1788 	struct xfs_perag	*pag;
1789 	struct xfs_icluster	xic = { 0 };
1790 	struct xfs_inode_log_item *iip = ip->i_itemp;
1791 	int			error;
1792 
1793 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
1794 	ASSERT(VFS_I(ip)->i_nlink == 0);
1795 	ASSERT(ip->i_df.if_nextents == 0);
1796 	ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
1797 	ASSERT(ip->i_nblocks == 0);
1798 
1799 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1800 
1801 	error = xfs_inode_uninit(tp, pag, ip, &xic);
1802 	if (error)
1803 		goto out;
1804 
1805 	if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
1806 		xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
1807 
1808 	/* Don't attempt to replay owner changes for a deleted inode */
1809 	spin_lock(&iip->ili_lock);
1810 	iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
1811 	spin_unlock(&iip->ili_lock);
1812 
1813 	if (xic.deleted)
1814 		error = xfs_ifree_cluster(tp, pag, ip, &xic);
1815 out:
1816 	xfs_perag_put(pag);
1817 	return error;
1818 }
1819 
1820 /*
1821  * This is called to unpin an inode.  The caller must have the inode locked
1822  * in at least shared mode so that the buffer cannot be subsequently pinned
1823  * once someone is waiting for it to be unpinned.
1824  */
1825 static void
xfs_iunpin(struct xfs_inode * ip)1826 xfs_iunpin(
1827 	struct xfs_inode	*ip)
1828 {
1829 	struct xfs_inode_log_item *iip = ip->i_itemp;
1830 	xfs_csn_t		seq = 0;
1831 
1832 	trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
1833 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED);
1834 
1835 	spin_lock(&iip->ili_lock);
1836 	seq = iip->ili_commit_seq;
1837 	spin_unlock(&iip->ili_lock);
1838 	if (!seq)
1839 		return;
1840 
1841 	/* Give the log a push to start the unpinning I/O */
1842 	xfs_log_force_seq(ip->i_mount, seq, 0, NULL);
1843 
1844 }
1845 
1846 static void
__xfs_iunpin_wait(struct xfs_inode * ip)1847 __xfs_iunpin_wait(
1848 	struct xfs_inode	*ip)
1849 {
1850 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
1851 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
1852 
1853 	xfs_iunpin(ip);
1854 
1855 	do {
1856 		prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
1857 		if (xfs_ipincount(ip))
1858 			io_schedule();
1859 	} while (xfs_ipincount(ip));
1860 	finish_wait(wq, &wait.wq_entry);
1861 }
1862 
1863 void
xfs_iunpin_wait(struct xfs_inode * ip)1864 xfs_iunpin_wait(
1865 	struct xfs_inode	*ip)
1866 {
1867 	if (xfs_ipincount(ip))
1868 		__xfs_iunpin_wait(ip);
1869 }
1870 
1871 /*
1872  * Removing an inode from the namespace involves removing the directory entry
1873  * and dropping the link count on the inode. Removing the directory entry can
1874  * result in locking an AGF (directory blocks were freed) and removing a link
1875  * count can result in placing the inode on an unlinked list which results in
1876  * locking an AGI.
1877  *
1878  * The big problem here is that we have an ordering constraint on AGF and AGI
1879  * locking - inode allocation locks the AGI, then can allocate a new extent for
1880  * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
1881  * removes the inode from the unlinked list, requiring that we lock the AGI
1882  * first, and then freeing the inode can result in an inode chunk being freed
1883  * and hence freeing disk space requiring that we lock an AGF.
1884  *
1885  * Hence the ordering that is imposed by other parts of the code is AGI before
1886  * AGF. This means we cannot remove the directory entry before we drop the inode
1887  * reference count and put it on the unlinked list as this results in a lock
1888  * order of AGF then AGI, and this can deadlock against inode allocation and
1889  * freeing. Therefore we must drop the link counts before we remove the
1890  * directory entry.
1891  *
1892  * This is still safe from a transactional point of view - it is not until we
1893  * get to xfs_defer_finish() that we have the possibility of multiple
1894  * transactions in this operation. Hence as long as we remove the directory
1895  * entry and drop the link count in the first transaction of the remove
1896  * operation, there are no transactional constraints on the ordering here.
1897  */
1898 int
xfs_remove(struct xfs_inode * dp,struct xfs_name * name,struct xfs_inode * ip)1899 xfs_remove(
1900 	struct xfs_inode	*dp,
1901 	struct xfs_name		*name,
1902 	struct xfs_inode	*ip)
1903 {
1904 	struct xfs_dir_update	du = {
1905 		.dp		= dp,
1906 		.name		= name,
1907 		.ip		= ip,
1908 	};
1909 	struct xfs_mount	*mp = dp->i_mount;
1910 	struct xfs_trans	*tp = NULL;
1911 	int			is_dir = S_ISDIR(VFS_I(ip)->i_mode);
1912 	int			dontcare;
1913 	int                     error = 0;
1914 	uint			resblks;
1915 
1916 	trace_xfs_remove(dp, name);
1917 
1918 	if (xfs_is_shutdown(mp))
1919 		return -EIO;
1920 	if (xfs_ifork_zapped(dp, XFS_DATA_FORK))
1921 		return -EIO;
1922 
1923 	error = xfs_qm_dqattach(dp);
1924 	if (error)
1925 		goto std_return;
1926 
1927 	error = xfs_qm_dqattach(ip);
1928 	if (error)
1929 		goto std_return;
1930 
1931 	error = xfs_parent_start(mp, &du.ppargs);
1932 	if (error)
1933 		goto std_return;
1934 
1935 	/*
1936 	 * We try to get the real space reservation first, allowing for
1937 	 * directory btree deletion(s) implying possible bmap insert(s).  If we
1938 	 * can't get the space reservation then we use 0 instead, and avoid the
1939 	 * bmap btree insert(s) in the directory code by, if the bmap insert
1940 	 * tries to happen, instead trimming the LAST block from the directory.
1941 	 *
1942 	 * Ignore EDQUOT and ENOSPC being returned via nospace_error because
1943 	 * the directory code can handle a reservationless update and we don't
1944 	 * want to prevent a user from trying to free space by deleting things.
1945 	 */
1946 	resblks = xfs_remove_space_res(mp, name->len);
1947 	error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, ip, &resblks,
1948 			&tp, &dontcare);
1949 	if (error) {
1950 		ASSERT(error != -ENOSPC);
1951 		goto out_parent;
1952 	}
1953 
1954 	error = xfs_dir_remove_child(tp, resblks, &du);
1955 	if (error)
1956 		goto out_trans_cancel;
1957 
1958 	/*
1959 	 * If this is a synchronous mount, make sure that the
1960 	 * remove transaction goes to disk before returning to
1961 	 * the user.
1962 	 */
1963 	if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1964 		xfs_trans_set_sync(tp);
1965 
1966 	error = xfs_trans_commit(tp);
1967 	if (error)
1968 		goto out_unlock;
1969 
1970 	if (is_dir && xfs_inode_is_filestream(ip))
1971 		xfs_filestream_deassociate(ip);
1972 
1973 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1974 	xfs_iunlock(dp, XFS_ILOCK_EXCL);
1975 	xfs_parent_finish(mp, du.ppargs);
1976 	return 0;
1977 
1978  out_trans_cancel:
1979 	xfs_trans_cancel(tp);
1980  out_unlock:
1981 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1982 	xfs_iunlock(dp, XFS_ILOCK_EXCL);
1983  out_parent:
1984 	xfs_parent_finish(mp, du.ppargs);
1985  std_return:
1986 	return error;
1987 }
1988 
1989 static inline void
xfs_iunlock_rename(struct xfs_inode ** i_tab,int num_inodes)1990 xfs_iunlock_rename(
1991 	struct xfs_inode	**i_tab,
1992 	int			num_inodes)
1993 {
1994 	int			i;
1995 
1996 	for (i = num_inodes - 1; i >= 0; i--) {
1997 		/* Skip duplicate inodes if src and target dps are the same */
1998 		if (!i_tab[i] || (i > 0 && i_tab[i] == i_tab[i - 1]))
1999 			continue;
2000 		xfs_iunlock(i_tab[i], XFS_ILOCK_EXCL);
2001 	}
2002 }
2003 
2004 /*
2005  * Enter all inodes for a rename transaction into a sorted array.
2006  */
2007 #define __XFS_SORT_INODES	5
2008 STATIC void
xfs_sort_for_rename(struct xfs_inode * dp1,struct xfs_inode * dp2,struct xfs_inode * ip1,struct xfs_inode * ip2,struct xfs_inode * wip,struct xfs_inode ** i_tab,int * num_inodes)2009 xfs_sort_for_rename(
2010 	struct xfs_inode	*dp1,	/* in: old (source) directory inode */
2011 	struct xfs_inode	*dp2,	/* in: new (target) directory inode */
2012 	struct xfs_inode	*ip1,	/* in: inode of old entry */
2013 	struct xfs_inode	*ip2,	/* in: inode of new entry */
2014 	struct xfs_inode	*wip,	/* in: whiteout inode */
2015 	struct xfs_inode	**i_tab,/* out: sorted array of inodes */
2016 	int			*num_inodes)  /* in/out: inodes in array */
2017 {
2018 	int			i;
2019 
2020 	ASSERT(*num_inodes == __XFS_SORT_INODES);
2021 	memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2022 
2023 	/*
2024 	 * i_tab contains a list of pointers to inodes.  We initialize
2025 	 * the table here & we'll sort it.  We will then use it to
2026 	 * order the acquisition of the inode locks.
2027 	 *
2028 	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
2029 	 */
2030 	i = 0;
2031 	i_tab[i++] = dp1;
2032 	i_tab[i++] = dp2;
2033 	i_tab[i++] = ip1;
2034 	if (ip2)
2035 		i_tab[i++] = ip2;
2036 	if (wip)
2037 		i_tab[i++] = wip;
2038 	*num_inodes = i;
2039 
2040 	xfs_sort_inodes(i_tab, *num_inodes);
2041 }
2042 
2043 void
xfs_sort_inodes(struct xfs_inode ** i_tab,unsigned int num_inodes)2044 xfs_sort_inodes(
2045 	struct xfs_inode	**i_tab,
2046 	unsigned int		num_inodes)
2047 {
2048 	int			i, j;
2049 
2050 	ASSERT(num_inodes <= __XFS_SORT_INODES);
2051 
2052 	/*
2053 	 * Sort the elements via bubble sort.  (Remember, there are at
2054 	 * most 5 elements to sort, so this is adequate.)
2055 	 */
2056 	for (i = 0; i < num_inodes; i++) {
2057 		for (j = 1; j < num_inodes; j++) {
2058 			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino)
2059 				swap(i_tab[j], i_tab[j - 1]);
2060 		}
2061 	}
2062 }
2063 
2064 /*
2065  * xfs_rename_alloc_whiteout()
2066  *
2067  * Return a referenced, unlinked, unlocked inode that can be used as a
2068  * whiteout in a rename transaction. We use a tmpfile inode here so that if we
2069  * crash between allocating the inode and linking it into the rename transaction
2070  * recovery will free the inode and we won't leak it.
2071  */
2072 static int
xfs_rename_alloc_whiteout(struct mnt_idmap * idmap,struct xfs_name * src_name,struct xfs_inode * dp,struct xfs_inode ** wip)2073 xfs_rename_alloc_whiteout(
2074 	struct mnt_idmap	*idmap,
2075 	struct xfs_name		*src_name,
2076 	struct xfs_inode	*dp,
2077 	struct xfs_inode	**wip)
2078 {
2079 	struct xfs_icreate_args	args = {
2080 		.idmap		= idmap,
2081 		.pip		= dp,
2082 		.mode		= S_IFCHR | WHITEOUT_MODE,
2083 		.flags		= XFS_ICREATE_TMPFILE,
2084 	};
2085 	struct xfs_inode	*tmpfile;
2086 	struct qstr		name;
2087 	int			error;
2088 
2089 	error = xfs_create_tmpfile(&args, &tmpfile);
2090 	if (error)
2091 		return error;
2092 
2093 	name.name = src_name->name;
2094 	name.len = src_name->len;
2095 	error = xfs_inode_init_security(VFS_I(tmpfile), VFS_I(dp), &name);
2096 	if (error) {
2097 		xfs_finish_inode_setup(tmpfile);
2098 		xfs_irele(tmpfile);
2099 		return error;
2100 	}
2101 
2102 	/*
2103 	 * Prepare the tmpfile inode as if it were created through the VFS.
2104 	 * Complete the inode setup and flag it as linkable.  nlink is already
2105 	 * zero, so we can skip the drop_nlink.
2106 	 */
2107 	xfs_setup_iops(tmpfile);
2108 	xfs_finish_inode_setup(tmpfile);
2109 	inode_state_set_raw(VFS_I(tmpfile), I_LINKABLE);
2110 
2111 	*wip = tmpfile;
2112 	return 0;
2113 }
2114 
2115 /*
2116  * xfs_rename
2117  */
2118 int
xfs_rename(struct mnt_idmap * idmap,struct xfs_inode * src_dp,struct xfs_name * src_name,struct xfs_inode * src_ip,struct xfs_inode * target_dp,struct xfs_name * target_name,struct xfs_inode * target_ip,unsigned int flags)2119 xfs_rename(
2120 	struct mnt_idmap	*idmap,
2121 	struct xfs_inode	*src_dp,
2122 	struct xfs_name		*src_name,
2123 	struct xfs_inode	*src_ip,
2124 	struct xfs_inode	*target_dp,
2125 	struct xfs_name		*target_name,
2126 	struct xfs_inode	*target_ip,
2127 	unsigned int		flags)
2128 {
2129 	struct xfs_dir_update	du_src = {
2130 		.dp		= src_dp,
2131 		.name		= src_name,
2132 		.ip		= src_ip,
2133 	};
2134 	struct xfs_dir_update	du_tgt = {
2135 		.dp		= target_dp,
2136 		.name		= target_name,
2137 		.ip		= target_ip,
2138 	};
2139 	struct xfs_dir_update	du_wip = { };
2140 	struct xfs_mount	*mp = src_dp->i_mount;
2141 	struct xfs_trans	*tp;
2142 	struct xfs_inode	*inodes[__XFS_SORT_INODES];
2143 	int			i;
2144 	int			num_inodes = __XFS_SORT_INODES;
2145 	bool			new_parent = (src_dp != target_dp);
2146 	bool			src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
2147 	int			spaceres;
2148 	bool			retried = false;
2149 	int			error, nospace_error = 0;
2150 
2151 	trace_xfs_rename(src_dp, target_dp, src_name, target_name);
2152 
2153 	if ((flags & RENAME_EXCHANGE) && !target_ip)
2154 		return -EINVAL;
2155 
2156 	/*
2157 	 * If we are doing a whiteout operation, allocate the whiteout inode
2158 	 * we will be placing at the target and ensure the type is set
2159 	 * appropriately.
2160 	 */
2161 	if (flags & RENAME_WHITEOUT) {
2162 		error = xfs_rename_alloc_whiteout(idmap, src_name, target_dp,
2163 				&du_wip.ip);
2164 		if (error)
2165 			return error;
2166 
2167 		/* setup target dirent info as whiteout */
2168 		src_name->type = XFS_DIR3_FT_CHRDEV;
2169 	}
2170 
2171 	xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, du_wip.ip,
2172 			inodes, &num_inodes);
2173 
2174 	error = xfs_parent_start(mp, &du_src.ppargs);
2175 	if (error)
2176 		goto out_release_wip;
2177 
2178 	if (du_wip.ip) {
2179 		error = xfs_parent_start(mp, &du_wip.ppargs);
2180 		if (error)
2181 			goto out_src_ppargs;
2182 	}
2183 
2184 	if (target_ip) {
2185 		error = xfs_parent_start(mp, &du_tgt.ppargs);
2186 		if (error)
2187 			goto out_wip_ppargs;
2188 	}
2189 
2190 retry:
2191 	nospace_error = 0;
2192 	spaceres = xfs_rename_space_res(mp, src_name->len, target_ip != NULL,
2193 			target_name->len, du_wip.ip != NULL);
2194 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
2195 	if (error == -ENOSPC) {
2196 		nospace_error = error;
2197 		spaceres = 0;
2198 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
2199 				&tp);
2200 	}
2201 	if (error)
2202 		goto out_tgt_ppargs;
2203 
2204 	/*
2205 	 * We don't allow reservationless renaming when parent pointers are
2206 	 * enabled because we can't back out if the xattrs must grow.
2207 	 */
2208 	if (du_src.ppargs && nospace_error) {
2209 		error = nospace_error;
2210 		xfs_trans_cancel(tp);
2211 		goto out_tgt_ppargs;
2212 	}
2213 
2214 	/*
2215 	 * Attach the dquots to the inodes
2216 	 */
2217 	error = xfs_qm_vop_rename_dqattach(inodes);
2218 	if (error) {
2219 		xfs_trans_cancel(tp);
2220 		goto out_tgt_ppargs;
2221 	}
2222 
2223 	/*
2224 	 * Lock all the participating inodes. Depending upon whether
2225 	 * the target_name exists in the target directory, and
2226 	 * whether the target directory is the same as the source
2227 	 * directory, we can lock from 2 to 5 inodes.
2228 	 */
2229 	xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
2230 
2231 	/*
2232 	 * Join all the inodes to the transaction.
2233 	 */
2234 	xfs_trans_ijoin(tp, src_dp, 0);
2235 	if (new_parent)
2236 		xfs_trans_ijoin(tp, target_dp, 0);
2237 	xfs_trans_ijoin(tp, src_ip, 0);
2238 	if (target_ip)
2239 		xfs_trans_ijoin(tp, target_ip, 0);
2240 	if (du_wip.ip)
2241 		xfs_trans_ijoin(tp, du_wip.ip, 0);
2242 
2243 	error = xfs_projid_differ(target_dp, src_ip);
2244 	if (error)
2245 		goto out_trans_cancel;
2246 
2247 	/* RENAME_EXCHANGE is unique from here on. */
2248 	if (flags & RENAME_EXCHANGE) {
2249 		error = xfs_dir_exchange_children(tp, &du_src, &du_tgt,
2250 				spaceres);
2251 		if (error)
2252 			goto out_trans_cancel;
2253 		goto out_commit;
2254 	}
2255 
2256 	/*
2257 	 * Try to reserve quota to handle an expansion of the target directory.
2258 	 * We'll allow the rename to continue in reservationless mode if we hit
2259 	 * a space usage constraint.  If we trigger reservationless mode, save
2260 	 * the errno if there isn't any free space in the target directory.
2261 	 */
2262 	if (spaceres != 0) {
2263 		error = xfs_trans_reserve_quota_nblks(tp, target_dp, spaceres,
2264 				0, false);
2265 		if (error == -EDQUOT || error == -ENOSPC) {
2266 			if (!retried) {
2267 				xfs_trans_cancel(tp);
2268 				xfs_iunlock_rename(inodes, num_inodes);
2269 				xfs_blockgc_free_quota(target_dp, 0);
2270 				retried = true;
2271 				goto retry;
2272 			}
2273 
2274 			nospace_error = error;
2275 			spaceres = 0;
2276 			error = 0;
2277 		}
2278 		if (error)
2279 			goto out_trans_cancel;
2280 	}
2281 
2282 	/*
2283 	 * We don't allow quotaless renaming when parent pointers are enabled
2284 	 * because we can't back out if the xattrs must grow.
2285 	 */
2286 	if (du_src.ppargs && nospace_error) {
2287 		error = nospace_error;
2288 		goto out_trans_cancel;
2289 	}
2290 
2291 	/*
2292 	 * Lock the AGI buffers we need to handle bumping the nlink of the
2293 	 * whiteout inode off the unlinked list and to handle dropping the
2294 	 * nlink of the target inode.  Per locking order rules, do this in
2295 	 * increasing AG order and before directory block allocation tries to
2296 	 * grab AGFs because we grab AGIs before AGFs.
2297 	 *
2298 	 * The (vfs) caller must ensure that if src is a directory then
2299 	 * target_ip is either null or an empty directory.
2300 	 */
2301 	for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
2302 		if (inodes[i] == du_wip.ip ||
2303 		    (inodes[i] == target_ip &&
2304 		     (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
2305 			struct xfs_perag	*pag;
2306 			struct xfs_buf		*bp;
2307 
2308 			pag = xfs_perag_get(mp,
2309 					XFS_INO_TO_AGNO(mp, inodes[i]->i_ino));
2310 			error = xfs_read_agi(pag, tp, 0, &bp);
2311 			xfs_perag_put(pag);
2312 			if (error)
2313 				goto out_trans_cancel;
2314 		}
2315 	}
2316 
2317 	error = xfs_dir_rename_children(tp, &du_src, &du_tgt, spaceres,
2318 			&du_wip);
2319 	if (error)
2320 		goto out_trans_cancel;
2321 
2322 	if (du_wip.ip) {
2323 		/*
2324 		 * Now we have a real link, clear the "I'm a tmpfile" state
2325 		 * flag from the inode so it doesn't accidentally get misused in
2326 		 * future.
2327 		 */
2328 		inode_state_clear_raw(VFS_I(du_wip.ip), I_LINKABLE);
2329 	}
2330 
2331 out_commit:
2332 	/*
2333 	 * If this is a synchronous mount, make sure that the rename
2334 	 * transaction goes to disk before returning to the user.
2335 	 */
2336 	if (xfs_has_wsync(tp->t_mountp) || xfs_has_dirsync(tp->t_mountp))
2337 		xfs_trans_set_sync(tp);
2338 
2339 	error = xfs_trans_commit(tp);
2340 	nospace_error = 0;
2341 	goto out_unlock;
2342 
2343 out_trans_cancel:
2344 	xfs_trans_cancel(tp);
2345 out_unlock:
2346 	xfs_iunlock_rename(inodes, num_inodes);
2347 out_tgt_ppargs:
2348 	xfs_parent_finish(mp, du_tgt.ppargs);
2349 out_wip_ppargs:
2350 	xfs_parent_finish(mp, du_wip.ppargs);
2351 out_src_ppargs:
2352 	xfs_parent_finish(mp, du_src.ppargs);
2353 out_release_wip:
2354 	if (du_wip.ip)
2355 		xfs_irele(du_wip.ip);
2356 	if (error == -ENOSPC && nospace_error)
2357 		error = nospace_error;
2358 	return error;
2359 }
2360 
2361 static int
xfs_iflush(struct xfs_inode * ip,struct xfs_buf * bp)2362 xfs_iflush(
2363 	struct xfs_inode	*ip,
2364 	struct xfs_buf		*bp)
2365 {
2366 	struct xfs_inode_log_item *iip = ip->i_itemp;
2367 	struct xfs_dinode	*dip;
2368 	struct xfs_mount	*mp = ip->i_mount;
2369 	int			error;
2370 
2371 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED);
2372 	ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
2373 	ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
2374 	       ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
2375 	ASSERT(iip->ili_item.li_buf == bp);
2376 
2377 	dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
2378 
2379 	/*
2380 	 * We don't flush the inode if any of the following checks fail, but we
2381 	 * do still update the log item and attach to the backing buffer as if
2382 	 * the flush happened. This is a formality to facilitate predictable
2383 	 * error handling as the caller will shutdown and fail the buffer.
2384 	 */
2385 	error = -EFSCORRUPTED;
2386 	if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC) ||
2387 	    XFS_TEST_ERROR(mp, XFS_ERRTAG_IFLUSH_1)) {
2388 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2389 			"%s: Bad inode %llu magic number 0x%x, ptr "PTR_FMT,
2390 			__func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
2391 		goto flush_out;
2392 	}
2393 	if (ip->i_df.if_format == XFS_DINODE_FMT_META_BTREE) {
2394 		if (!S_ISREG(VFS_I(ip)->i_mode) ||
2395 		    !(ip->i_diflags2 & XFS_DIFLAG2_METADATA)) {
2396 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2397 				"%s: Bad %s meta btree inode %Lu, ptr "PTR_FMT,
2398 				__func__, xfs_metafile_type_str(ip->i_metatype),
2399 				ip->i_ino, ip);
2400 			goto flush_out;
2401 		}
2402 	} else if (S_ISREG(VFS_I(ip)->i_mode)) {
2403 		if ((ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
2404 		     ip->i_df.if_format != XFS_DINODE_FMT_BTREE) ||
2405 		    XFS_TEST_ERROR(mp, XFS_ERRTAG_IFLUSH_3)) {
2406 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2407 				"%s: Bad regular inode %llu, ptr "PTR_FMT,
2408 				__func__, ip->i_ino, ip);
2409 			goto flush_out;
2410 		}
2411 	} else if (S_ISDIR(VFS_I(ip)->i_mode)) {
2412 		if ((ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
2413 		     ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
2414 		     ip->i_df.if_format != XFS_DINODE_FMT_LOCAL) ||
2415 		    XFS_TEST_ERROR(mp, XFS_ERRTAG_IFLUSH_4)) {
2416 			xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2417 				"%s: Bad directory inode %llu, ptr "PTR_FMT,
2418 				__func__, ip->i_ino, ip);
2419 			goto flush_out;
2420 		}
2421 	}
2422 	if (ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af) >
2423 	    ip->i_nblocks || XFS_TEST_ERROR(mp, XFS_ERRTAG_IFLUSH_5)) {
2424 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2425 			"%s: detected corrupt incore inode %llu, "
2426 			"total extents = %llu nblocks = %lld, ptr "PTR_FMT,
2427 			__func__, ip->i_ino,
2428 			ip->i_df.if_nextents + xfs_ifork_nextents(&ip->i_af),
2429 			ip->i_nblocks, ip);
2430 		goto flush_out;
2431 	}
2432 	if (ip->i_forkoff > mp->m_sb.sb_inodesize ||
2433 	    XFS_TEST_ERROR(mp, XFS_ERRTAG_IFLUSH_6)) {
2434 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2435 			"%s: bad inode %llu, forkoff 0x%x, ptr "PTR_FMT,
2436 			__func__, ip->i_ino, ip->i_forkoff, ip);
2437 		goto flush_out;
2438 	}
2439 
2440 	if (xfs_inode_has_attr_fork(ip) &&
2441 	    ip->i_af.if_format == XFS_DINODE_FMT_META_BTREE) {
2442 		xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
2443 			"%s: meta btree in inode %Lu attr fork, ptr "PTR_FMT,
2444 			__func__, ip->i_ino, ip);
2445 		goto flush_out;
2446 	}
2447 
2448 	/*
2449 	 * Inode item log recovery for v2 inodes are dependent on the flushiter
2450 	 * count for correct sequencing.  We bump the flush iteration count so
2451 	 * we can detect flushes which postdate a log record during recovery.
2452 	 * This is redundant as we now log every change and hence this can't
2453 	 * happen but we need to still do it to ensure backwards compatibility
2454 	 * with old kernels that predate logging all inode changes.
2455 	 */
2456 	if (!xfs_has_v3inodes(mp))
2457 		ip->i_flushiter++;
2458 
2459 	/*
2460 	 * If there are inline format data / attr forks attached to this inode,
2461 	 * make sure they are not corrupt.
2462 	 */
2463 	if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
2464 	    xfs_ifork_verify_local_data(ip))
2465 		goto flush_out;
2466 	if (xfs_inode_has_attr_fork(ip) &&
2467 	    ip->i_af.if_format == XFS_DINODE_FMT_LOCAL &&
2468 	    xfs_ifork_verify_local_attr(ip))
2469 		goto flush_out;
2470 
2471 	/*
2472 	 * Copy the dirty parts of the inode into the on-disk inode.  We always
2473 	 * copy out the core of the inode, because if the inode is dirty at all
2474 	 * the core must be.
2475 	 */
2476 	xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
2477 
2478 	/* Wrap, we never let the log put out DI_MAX_FLUSH */
2479 	if (!xfs_has_v3inodes(mp)) {
2480 		if (ip->i_flushiter == DI_MAX_FLUSH)
2481 			ip->i_flushiter = 0;
2482 	}
2483 
2484 	xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
2485 	if (xfs_inode_has_attr_fork(ip))
2486 		xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
2487 
2488 	/*
2489 	 * We've recorded everything logged in the inode, so we'd like to clear
2490 	 * the ili_fields bits so we don't log and flush things unnecessarily.
2491 	 * However, we can't stop logging all this information until the data
2492 	 * we've copied into the disk buffer is written to disk.  If we did we
2493 	 * might overwrite the copy of the inode in the log with all the data
2494 	 * after re-logging only part of it, and in the face of a crash we
2495 	 * wouldn't have all the data we need to recover.
2496 	 *
2497 	 * What we do is move the bits to the ili_last_fields field.  When
2498 	 * logging the inode, these bits are moved back to the ili_fields field.
2499 	 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
2500 	 * we know that the information those bits represent is permanently on
2501 	 * disk.  As long as the flush completes before the inode is logged
2502 	 * again, then both ili_fields and ili_last_fields will be cleared.
2503 	 */
2504 	error = 0;
2505 flush_out:
2506 	spin_lock(&iip->ili_lock);
2507 	iip->ili_last_fields = iip->ili_fields;
2508 	iip->ili_fields = 0;
2509 	set_bit(XFS_LI_FLUSHING, &iip->ili_item.li_flags);
2510 	spin_unlock(&iip->ili_lock);
2511 
2512 	/*
2513 	 * Store the current LSN of the inode so that we can tell whether the
2514 	 * item has moved in the AIL from xfs_buf_inode_iodone().
2515 	 */
2516 	xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2517 				&iip->ili_item.li_lsn);
2518 
2519 	/* generate the checksum. */
2520 	xfs_dinode_calc_crc(mp, dip);
2521 	if (error)
2522 		xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE);
2523 	return error;
2524 }
2525 
2526 /*
2527  * Non-blocking flush of dirty inode metadata into the backing buffer.
2528  *
2529  * The caller must have a reference to the inode and hold the cluster buffer
2530  * locked. The function will walk across all the inodes on the cluster buffer it
2531  * can find and lock without blocking, and flush them to the cluster buffer.
2532  *
2533  * On successful flushing of at least one inode, the caller must write out the
2534  * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
2535  * the caller needs to release the buffer. On failure, the filesystem will be
2536  * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
2537  * will be returned.
2538  */
2539 int
xfs_iflush_cluster(struct xfs_buf * bp)2540 xfs_iflush_cluster(
2541 	struct xfs_buf		*bp)
2542 {
2543 	struct xfs_mount	*mp = bp->b_mount;
2544 	struct xfs_log_item	*lip, *n;
2545 	struct xfs_inode	*ip;
2546 	struct xfs_inode_log_item *iip;
2547 	int			clcount = 0;
2548 	int			error = 0;
2549 
2550 	/*
2551 	 * We must use the safe variant here as on shutdown xfs_iflush_abort()
2552 	 * will remove itself from the list.
2553 	 */
2554 	list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
2555 		iip = (struct xfs_inode_log_item *)lip;
2556 		ip = iip->ili_inode;
2557 
2558 		/*
2559 		 * Quick and dirty check to avoid locks if possible.
2560 		 */
2561 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
2562 			continue;
2563 		if (xfs_ipincount(ip))
2564 			continue;
2565 
2566 		/*
2567 		 * The inode is still attached to the buffer, which means it is
2568 		 * dirty but reclaim might try to grab it. Check carefully for
2569 		 * that, and grab the ilock while still holding the i_flags_lock
2570 		 * to guarantee reclaim will not be able to reclaim this inode
2571 		 * once we drop the i_flags_lock.
2572 		 */
2573 		spin_lock(&ip->i_flags_lock);
2574 		ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
2575 		if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
2576 			spin_unlock(&ip->i_flags_lock);
2577 			continue;
2578 		}
2579 
2580 		/*
2581 		 * ILOCK will pin the inode against reclaim and prevent
2582 		 * concurrent transactions modifying the inode while we are
2583 		 * flushing the inode. If we get the lock, set the flushing
2584 		 * state before we drop the i_flags_lock.
2585 		 */
2586 		if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
2587 			spin_unlock(&ip->i_flags_lock);
2588 			continue;
2589 		}
2590 		__xfs_iflags_set(ip, XFS_IFLUSHING);
2591 		spin_unlock(&ip->i_flags_lock);
2592 
2593 		/*
2594 		 * Abort flushing this inode if we are shut down because the
2595 		 * inode may not currently be in the AIL. This can occur when
2596 		 * log I/O failure unpins the inode without inserting into the
2597 		 * AIL, leaving a dirty/unpinned inode attached to the buffer
2598 		 * that otherwise looks like it should be flushed.
2599 		 */
2600 		if (xlog_is_shutdown(mp->m_log)) {
2601 			xfs_iunpin_wait(ip);
2602 			xfs_iflush_abort(ip);
2603 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
2604 			error = -EIO;
2605 			continue;
2606 		}
2607 
2608 		/* don't block waiting on a log force to unpin dirty inodes */
2609 		if (xfs_ipincount(ip)) {
2610 			xfs_iflags_clear(ip, XFS_IFLUSHING);
2611 			xfs_iunlock(ip, XFS_ILOCK_SHARED);
2612 			continue;
2613 		}
2614 
2615 		if (!xfs_inode_clean(ip))
2616 			error = xfs_iflush(ip, bp);
2617 		else
2618 			xfs_iflags_clear(ip, XFS_IFLUSHING);
2619 		xfs_iunlock(ip, XFS_ILOCK_SHARED);
2620 		if (error)
2621 			break;
2622 		clcount++;
2623 	}
2624 
2625 	if (error) {
2626 		/*
2627 		 * Shutdown first so we kill the log before we release this
2628 		 * buffer. If it is an INODE_ALLOC buffer and pins the tail
2629 		 * of the log, failing it before the _log_ is shut down can
2630 		 * result in the log tail being moved forward in the journal
2631 		 * on disk because log writes can still be taking place. Hence
2632 		 * unpinning the tail will allow the ICREATE intent to be
2633 		 * removed from the log an recovery will fail with uninitialised
2634 		 * inode cluster buffers.
2635 		 */
2636 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
2637 		bp->b_flags |= XBF_ASYNC;
2638 		xfs_buf_ioend_fail(bp);
2639 		return error;
2640 	}
2641 
2642 	if (!clcount)
2643 		return -EAGAIN;
2644 
2645 	XFS_STATS_INC(mp, xs_icluster_flushcnt);
2646 	XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
2647 	return 0;
2648 
2649 }
2650 
2651 /* Release an inode. */
2652 void
xfs_irele(struct xfs_inode * ip)2653 xfs_irele(
2654 	struct xfs_inode	*ip)
2655 {
2656 	trace_xfs_irele(ip, _RET_IP_);
2657 	iput(VFS_I(ip));
2658 }
2659 
2660 /*
2661  * Ensure all commited transactions touching the inode are written to the log.
2662  */
2663 int
xfs_log_force_inode(struct xfs_inode * ip)2664 xfs_log_force_inode(
2665 	struct xfs_inode	*ip)
2666 {
2667 	struct xfs_inode_log_item *iip = ip->i_itemp;
2668 	xfs_csn_t		seq = 0;
2669 
2670 	if (!iip)
2671 		return 0;
2672 
2673 	spin_lock(&iip->ili_lock);
2674 	seq = iip->ili_commit_seq;
2675 	spin_unlock(&iip->ili_lock);
2676 
2677 	if (!seq)
2678 		return 0;
2679 	return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
2680 }
2681 
2682 /*
2683  * Grab the exclusive iolock for a data copy from src to dest, making sure to
2684  * abide vfs locking order (lowest pointer value goes first) and breaking the
2685  * layout leases before proceeding.  The loop is needed because we cannot call
2686  * the blocking break_layout() with the iolocks held, and therefore have to
2687  * back out both locks.
2688  */
2689 static int
xfs_iolock_two_inodes_and_break_layout(struct inode * src,struct inode * dest)2690 xfs_iolock_two_inodes_and_break_layout(
2691 	struct inode		*src,
2692 	struct inode		*dest)
2693 {
2694 	int			error;
2695 
2696 	if (src > dest)
2697 		swap(src, dest);
2698 
2699 retry:
2700 	/* Wait to break both inodes' layouts before we start locking. */
2701 	error = break_layout(src, true);
2702 	if (error)
2703 		return error;
2704 	if (src != dest) {
2705 		error = break_layout(dest, true);
2706 		if (error)
2707 			return error;
2708 	}
2709 
2710 	/* Lock one inode and make sure nobody got in and leased it. */
2711 	inode_lock(src);
2712 	error = break_layout(src, false);
2713 	if (error) {
2714 		inode_unlock(src);
2715 		if (error == -EWOULDBLOCK)
2716 			goto retry;
2717 		return error;
2718 	}
2719 
2720 	if (src == dest)
2721 		return 0;
2722 
2723 	/* Lock the other inode and make sure nobody got in and leased it. */
2724 	inode_lock_nested(dest, I_MUTEX_NONDIR2);
2725 	error = break_layout(dest, false);
2726 	if (error) {
2727 		inode_unlock(src);
2728 		inode_unlock(dest);
2729 		if (error == -EWOULDBLOCK)
2730 			goto retry;
2731 		return error;
2732 	}
2733 
2734 	return 0;
2735 }
2736 
2737 static int
xfs_mmaplock_two_inodes_and_break_dax_layout(struct xfs_inode * ip1,struct xfs_inode * ip2)2738 xfs_mmaplock_two_inodes_and_break_dax_layout(
2739 	struct xfs_inode	*ip1,
2740 	struct xfs_inode	*ip2)
2741 {
2742 	int			error;
2743 
2744 	if (ip1->i_ino > ip2->i_ino)
2745 		swap(ip1, ip2);
2746 
2747 again:
2748 	/* Lock the first inode */
2749 	xfs_ilock(ip1, XFS_MMAPLOCK_EXCL);
2750 	error = xfs_break_dax_layouts(VFS_I(ip1));
2751 	if (error) {
2752 		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
2753 		return error;
2754 	}
2755 
2756 	if (ip1 == ip2)
2757 		return 0;
2758 
2759 	/* Nested lock the second inode */
2760 	xfs_ilock(ip2, xfs_lock_inumorder(XFS_MMAPLOCK_EXCL, 1));
2761 	/*
2762 	 * We cannot use xfs_break_dax_layouts() directly here because it may
2763 	 * need to unlock & lock the XFS_MMAPLOCK_EXCL which is not suitable
2764 	 * for this nested lock case.
2765 	 */
2766 	error = dax_break_layout(VFS_I(ip2), 0, -1, NULL);
2767 	if (error) {
2768 		xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
2769 		xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
2770 		goto again;
2771 	}
2772 
2773 	return 0;
2774 }
2775 
2776 /*
2777  * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
2778  * mmap activity.
2779  */
2780 int
xfs_ilock2_io_mmap(struct xfs_inode * ip1,struct xfs_inode * ip2)2781 xfs_ilock2_io_mmap(
2782 	struct xfs_inode	*ip1,
2783 	struct xfs_inode	*ip2)
2784 {
2785 	int			ret;
2786 
2787 	ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
2788 	if (ret)
2789 		return ret;
2790 
2791 	if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
2792 		ret = xfs_mmaplock_two_inodes_and_break_dax_layout(ip1, ip2);
2793 		if (ret) {
2794 			inode_unlock(VFS_I(ip2));
2795 			if (ip1 != ip2)
2796 				inode_unlock(VFS_I(ip1));
2797 			return ret;
2798 		}
2799 	} else
2800 		filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
2801 					    VFS_I(ip2)->i_mapping);
2802 
2803 	return 0;
2804 }
2805 
2806 /* Unlock both inodes to allow IO and mmap activity. */
2807 void
xfs_iunlock2_io_mmap(struct xfs_inode * ip1,struct xfs_inode * ip2)2808 xfs_iunlock2_io_mmap(
2809 	struct xfs_inode	*ip1,
2810 	struct xfs_inode	*ip2)
2811 {
2812 	if (IS_DAX(VFS_I(ip1)) && IS_DAX(VFS_I(ip2))) {
2813 		xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
2814 		if (ip1 != ip2)
2815 			xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL);
2816 	} else
2817 		filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
2818 					      VFS_I(ip2)->i_mapping);
2819 
2820 	inode_unlock(VFS_I(ip2));
2821 	if (ip1 != ip2)
2822 		inode_unlock(VFS_I(ip1));
2823 }
2824 
2825 /* Drop the MMAPLOCK and the IOLOCK after a remap completes. */
2826 void
xfs_iunlock2_remapping(struct xfs_inode * ip1,struct xfs_inode * ip2)2827 xfs_iunlock2_remapping(
2828 	struct xfs_inode	*ip1,
2829 	struct xfs_inode	*ip2)
2830 {
2831 	xfs_iflags_clear(ip1, XFS_IREMAPPING);
2832 
2833 	if (ip1 != ip2)
2834 		xfs_iunlock(ip1, XFS_MMAPLOCK_SHARED);
2835 	xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL);
2836 
2837 	if (ip1 != ip2)
2838 		inode_unlock_shared(VFS_I(ip1));
2839 	inode_unlock(VFS_I(ip2));
2840 }
2841 
2842 /*
2843  * Reload the incore inode list for this inode.  Caller should ensure that
2844  * the link count cannot change, either by taking ILOCK_SHARED or otherwise
2845  * preventing other threads from executing.
2846  */
2847 int
xfs_inode_reload_unlinked_bucket(struct xfs_trans * tp,struct xfs_inode * ip)2848 xfs_inode_reload_unlinked_bucket(
2849 	struct xfs_trans	*tp,
2850 	struct xfs_inode	*ip)
2851 {
2852 	struct xfs_mount	*mp = tp->t_mountp;
2853 	struct xfs_buf		*agibp;
2854 	struct xfs_agi		*agi;
2855 	struct xfs_perag	*pag;
2856 	xfs_agnumber_t		agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2857 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2858 	xfs_agino_t		prev_agino, next_agino;
2859 	unsigned int		bucket;
2860 	bool			foundit = false;
2861 	int			error;
2862 
2863 	/* Grab the first inode in the list */
2864 	pag = xfs_perag_get(mp, agno);
2865 	error = xfs_ialloc_read_agi(pag, tp, 0, &agibp);
2866 	xfs_perag_put(pag);
2867 	if (error)
2868 		return error;
2869 
2870 	/*
2871 	 * We've taken ILOCK_SHARED and the AGI buffer lock to stabilize the
2872 	 * incore unlinked list pointers for this inode.  Check once more to
2873 	 * see if we raced with anyone else to reload the unlinked list.
2874 	 */
2875 	if (!xfs_inode_unlinked_incomplete(ip)) {
2876 		foundit = true;
2877 		goto out_agibp;
2878 	}
2879 
2880 	bucket = agino % XFS_AGI_UNLINKED_BUCKETS;
2881 	agi = agibp->b_addr;
2882 
2883 	trace_xfs_inode_reload_unlinked_bucket(ip);
2884 
2885 	xfs_info_ratelimited(mp,
2886  "Found unrecovered unlinked inode 0x%x in AG 0x%x.  Initiating list recovery.",
2887 			agino, agno);
2888 
2889 	prev_agino = NULLAGINO;
2890 	next_agino = be32_to_cpu(agi->agi_unlinked[bucket]);
2891 	while (next_agino != NULLAGINO) {
2892 		struct xfs_inode	*next_ip = NULL;
2893 
2894 		/* Found this caller's inode, set its backlink. */
2895 		if (next_agino == agino) {
2896 			next_ip = ip;
2897 			next_ip->i_prev_unlinked = prev_agino;
2898 			foundit = true;
2899 			goto next_inode;
2900 		}
2901 
2902 		/* Try in-memory lookup first. */
2903 		next_ip = xfs_iunlink_lookup(pag, next_agino);
2904 		if (next_ip)
2905 			goto next_inode;
2906 
2907 		/* Inode not in memory, try reloading it. */
2908 		error = xfs_iunlink_reload_next(tp, agibp, prev_agino,
2909 				next_agino);
2910 		if (error)
2911 			break;
2912 
2913 		/* Grab the reloaded inode. */
2914 		next_ip = xfs_iunlink_lookup(pag, next_agino);
2915 		if (!next_ip) {
2916 			/* No incore inode at all?  We reloaded it... */
2917 			ASSERT(next_ip != NULL);
2918 			error = -EFSCORRUPTED;
2919 			break;
2920 		}
2921 
2922 next_inode:
2923 		prev_agino = next_agino;
2924 		next_agino = next_ip->i_next_unlinked;
2925 	}
2926 
2927 out_agibp:
2928 	xfs_trans_brelse(tp, agibp);
2929 	/* Should have found this inode somewhere in the iunlinked bucket. */
2930 	if (!error && !foundit)
2931 		error = -EFSCORRUPTED;
2932 	return error;
2933 }
2934 
2935 /* Decide if this inode is missing its unlinked list and reload it. */
2936 int
xfs_inode_reload_unlinked(struct xfs_inode * ip)2937 xfs_inode_reload_unlinked(
2938 	struct xfs_inode	*ip)
2939 {
2940 	struct xfs_trans	*tp;
2941 	int			error = 0;
2942 
2943 	tp = xfs_trans_alloc_empty(ip->i_mount);
2944 	xfs_ilock(ip, XFS_ILOCK_SHARED);
2945 	if (xfs_inode_unlinked_incomplete(ip))
2946 		error = xfs_inode_reload_unlinked_bucket(tp, ip);
2947 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
2948 	xfs_trans_cancel(tp);
2949 
2950 	return error;
2951 }
2952 
2953 /* Has this inode fork been zapped by repair? */
2954 bool
xfs_ifork_zapped(const struct xfs_inode * ip,int whichfork)2955 xfs_ifork_zapped(
2956 	const struct xfs_inode	*ip,
2957 	int			whichfork)
2958 {
2959 	unsigned int		datamask = 0;
2960 
2961 	switch (whichfork) {
2962 	case XFS_DATA_FORK:
2963 		switch (ip->i_vnode.i_mode & S_IFMT) {
2964 		case S_IFDIR:
2965 			datamask = XFS_SICK_INO_DIR_ZAPPED;
2966 			break;
2967 		case S_IFLNK:
2968 			datamask = XFS_SICK_INO_SYMLINK_ZAPPED;
2969 			break;
2970 		}
2971 		return ip->i_sick & (XFS_SICK_INO_BMBTD_ZAPPED | datamask);
2972 	case XFS_ATTR_FORK:
2973 		return ip->i_sick & XFS_SICK_INO_BMBTA_ZAPPED;
2974 	default:
2975 		return false;
2976 	}
2977 }
2978 
2979 /* Compute the number of data and realtime blocks used by a file. */
2980 void
xfs_inode_count_blocks(struct xfs_trans * tp,struct xfs_inode * ip,xfs_filblks_t * dblocks,xfs_filblks_t * rblocks)2981 xfs_inode_count_blocks(
2982 	struct xfs_trans	*tp,
2983 	struct xfs_inode	*ip,
2984 	xfs_filblks_t		*dblocks,
2985 	xfs_filblks_t		*rblocks)
2986 {
2987 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
2988 
2989 	*rblocks = 0;
2990 	if (XFS_IS_REALTIME_INODE(ip))
2991 		xfs_bmap_count_leaves(ifp, rblocks);
2992 	*dblocks = ip->i_nblocks - *rblocks;
2993 }
2994 
2995 static void
xfs_wait_dax_page(struct inode * inode)2996 xfs_wait_dax_page(
2997 	struct inode		*inode)
2998 {
2999 	struct xfs_inode        *ip = XFS_I(inode);
3000 
3001 	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
3002 	schedule();
3003 	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
3004 }
3005 
3006 int
xfs_break_dax_layouts(struct inode * inode)3007 xfs_break_dax_layouts(
3008 	struct inode		*inode)
3009 {
3010 	xfs_assert_ilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL);
3011 
3012 	return dax_break_layout_inode(inode, xfs_wait_dax_page);
3013 }
3014 
3015 int
xfs_break_layouts(struct inode * inode,uint * iolock,enum layout_break_reason reason)3016 xfs_break_layouts(
3017 	struct inode		*inode,
3018 	uint			*iolock,
3019 	enum layout_break_reason reason)
3020 {
3021 	bool			retry;
3022 	int			error;
3023 
3024 	xfs_assert_ilocked(XFS_I(inode), XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL);
3025 
3026 	do {
3027 		retry = false;
3028 		switch (reason) {
3029 		case BREAK_UNMAP:
3030 			error = xfs_break_dax_layouts(inode);
3031 			if (error)
3032 				break;
3033 			fallthrough;
3034 		case BREAK_WRITE:
3035 			error = xfs_break_leased_layouts(inode, iolock, &retry);
3036 			break;
3037 		default:
3038 			WARN_ON_ONCE(1);
3039 			error = -EINVAL;
3040 		}
3041 	} while (error == 0 && retry);
3042 
3043 	return error;
3044 }
3045 
3046 /* Returns the size of fundamental allocation unit for a file, in bytes. */
3047 unsigned int
xfs_inode_alloc_unitsize(struct xfs_inode * ip)3048 xfs_inode_alloc_unitsize(
3049 	struct xfs_inode	*ip)
3050 {
3051 	unsigned int		blocks = 1;
3052 
3053 	if (XFS_IS_REALTIME_INODE(ip))
3054 		blocks = ip->i_mount->m_sb.sb_rextsize;
3055 
3056 	return XFS_FSB_TO_B(ip->i_mount, blocks);
3057 }
3058 
3059 /* Should we always be using copy on write for file writes? */
3060 bool
xfs_is_always_cow_inode(const struct xfs_inode * ip)3061 xfs_is_always_cow_inode(
3062 	const struct xfs_inode	*ip)
3063 {
3064 	return xfs_is_zoned_inode(ip) ||
3065 		(ip->i_mount->m_always_cow && xfs_has_reflink(ip->i_mount));
3066 }
3067