xref: /linux/fs/xfs/xfs_inode.c (revision 6e8331ac6973435b1e7604c30f2ad394035b46e1)
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_imap.h"
25 #include "xfs_trans.h"
26 #include "xfs_trans_priv.h"
27 #include "xfs_sb.h"
28 #include "xfs_ag.h"
29 #include "xfs_dir2.h"
30 #include "xfs_dmapi.h"
31 #include "xfs_mount.h"
32 #include "xfs_bmap_btree.h"
33 #include "xfs_alloc_btree.h"
34 #include "xfs_ialloc_btree.h"
35 #include "xfs_dir2_sf.h"
36 #include "xfs_attr_sf.h"
37 #include "xfs_dinode.h"
38 #include "xfs_inode.h"
39 #include "xfs_buf_item.h"
40 #include "xfs_inode_item.h"
41 #include "xfs_btree.h"
42 #include "xfs_alloc.h"
43 #include "xfs_ialloc.h"
44 #include "xfs_bmap.h"
45 #include "xfs_rw.h"
46 #include "xfs_error.h"
47 #include "xfs_utils.h"
48 #include "xfs_dir2_trace.h"
49 #include "xfs_quota.h"
50 #include "xfs_mac.h"
51 #include "xfs_acl.h"
52 
53 
54 kmem_zone_t *xfs_ifork_zone;
55 kmem_zone_t *xfs_inode_zone;
56 kmem_zone_t *xfs_chashlist_zone;
57 
58 /*
59  * Used in xfs_itruncate().  This is the maximum number of extents
60  * freed from a file in a single transaction.
61  */
62 #define	XFS_ITRUNC_MAX_EXTENTS	2
63 
64 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
65 STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
66 STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
67 STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
68 
69 
70 #ifdef DEBUG
71 /*
72  * Make sure that the extents in the given memory buffer
73  * are valid.
74  */
75 STATIC void
76 xfs_validate_extents(
77 	xfs_ifork_t		*ifp,
78 	int			nrecs,
79 	int			disk,
80 	xfs_exntfmt_t		fmt)
81 {
82 	xfs_bmbt_rec_t		*ep;
83 	xfs_bmbt_irec_t		irec;
84 	xfs_bmbt_rec_t		rec;
85 	int			i;
86 
87 	for (i = 0; i < nrecs; i++) {
88 		ep = xfs_iext_get_ext(ifp, i);
89 		rec.l0 = get_unaligned((__uint64_t*)&ep->l0);
90 		rec.l1 = get_unaligned((__uint64_t*)&ep->l1);
91 		if (disk)
92 			xfs_bmbt_disk_get_all(&rec, &irec);
93 		else
94 			xfs_bmbt_get_all(&rec, &irec);
95 		if (fmt == XFS_EXTFMT_NOSTATE)
96 			ASSERT(irec.br_state == XFS_EXT_NORM);
97 	}
98 }
99 #else /* DEBUG */
100 #define xfs_validate_extents(ifp, nrecs, disk, fmt)
101 #endif /* DEBUG */
102 
103 /*
104  * Check that none of the inode's in the buffer have a next
105  * unlinked field of 0.
106  */
107 #if defined(DEBUG)
108 void
109 xfs_inobp_check(
110 	xfs_mount_t	*mp,
111 	xfs_buf_t	*bp)
112 {
113 	int		i;
114 	int		j;
115 	xfs_dinode_t	*dip;
116 
117 	j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
118 
119 	for (i = 0; i < j; i++) {
120 		dip = (xfs_dinode_t *)xfs_buf_offset(bp,
121 					i * mp->m_sb.sb_inodesize);
122 		if (!dip->di_next_unlinked)  {
123 			xfs_fs_cmn_err(CE_ALERT, mp,
124 				"Detected a bogus zero next_unlinked field in incore inode buffer 0x%p.  About to pop an ASSERT.",
125 				bp);
126 			ASSERT(dip->di_next_unlinked);
127 		}
128 	}
129 }
130 #endif
131 
132 /*
133  * This routine is called to map an inode number within a file
134  * system to the buffer containing the on-disk version of the
135  * inode.  It returns a pointer to the buffer containing the
136  * on-disk inode in the bpp parameter, and in the dip parameter
137  * it returns a pointer to the on-disk inode within that buffer.
138  *
139  * If a non-zero error is returned, then the contents of bpp and
140  * dipp are undefined.
141  *
142  * Use xfs_imap() to determine the size and location of the
143  * buffer to read from disk.
144  */
145 STATIC int
146 xfs_inotobp(
147 	xfs_mount_t	*mp,
148 	xfs_trans_t	*tp,
149 	xfs_ino_t	ino,
150 	xfs_dinode_t	**dipp,
151 	xfs_buf_t	**bpp,
152 	int		*offset)
153 {
154 	int		di_ok;
155 	xfs_imap_t	imap;
156 	xfs_buf_t	*bp;
157 	int		error;
158 	xfs_dinode_t	*dip;
159 
160 	/*
161 	 * Call the space management code to find the location of the
162 	 * inode on disk.
163 	 */
164 	imap.im_blkno = 0;
165 	error = xfs_imap(mp, tp, ino, &imap, XFS_IMAP_LOOKUP);
166 	if (error != 0) {
167 		cmn_err(CE_WARN,
168 	"xfs_inotobp: xfs_imap()  returned an "
169 	"error %d on %s.  Returning error.", error, mp->m_fsname);
170 		return error;
171 	}
172 
173 	/*
174 	 * If the inode number maps to a block outside the bounds of the
175 	 * file system then return NULL rather than calling read_buf
176 	 * and panicing when we get an error from the driver.
177 	 */
178 	if ((imap.im_blkno + imap.im_len) >
179 	    XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
180 		cmn_err(CE_WARN,
181 	"xfs_inotobp: inode number (%llu + %d) maps to a block outside the bounds "
182 	"of the file system %s.  Returning EINVAL.",
183 			(unsigned long long)imap.im_blkno,
184 			imap.im_len, mp->m_fsname);
185 		return XFS_ERROR(EINVAL);
186 	}
187 
188 	/*
189 	 * Read in the buffer.  If tp is NULL, xfs_trans_read_buf() will
190 	 * default to just a read_buf() call.
191 	 */
192 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno,
193 				   (int)imap.im_len, XFS_BUF_LOCK, &bp);
194 
195 	if (error) {
196 		cmn_err(CE_WARN,
197 	"xfs_inotobp: xfs_trans_read_buf()  returned an "
198 	"error %d on %s.  Returning error.", error, mp->m_fsname);
199 		return error;
200 	}
201 	dip = (xfs_dinode_t *)xfs_buf_offset(bp, 0);
202 	di_ok =
203 		INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC &&
204 		XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT));
205 	if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP,
206 			XFS_RANDOM_ITOBP_INOTOBP))) {
207 		XFS_CORRUPTION_ERROR("xfs_inotobp", XFS_ERRLEVEL_LOW, mp, dip);
208 		xfs_trans_brelse(tp, bp);
209 		cmn_err(CE_WARN,
210 	"xfs_inotobp: XFS_TEST_ERROR()  returned an "
211 	"error on %s.  Returning EFSCORRUPTED.",  mp->m_fsname);
212 		return XFS_ERROR(EFSCORRUPTED);
213 	}
214 
215 	xfs_inobp_check(mp, bp);
216 
217 	/*
218 	 * Set *dipp to point to the on-disk inode in the buffer.
219 	 */
220 	*dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
221 	*bpp = bp;
222 	*offset = imap.im_boffset;
223 	return 0;
224 }
225 
226 
227 /*
228  * This routine is called to map an inode to the buffer containing
229  * the on-disk version of the inode.  It returns a pointer to the
230  * buffer containing the on-disk inode in the bpp parameter, and in
231  * the dip parameter it returns a pointer to the on-disk inode within
232  * that buffer.
233  *
234  * If a non-zero error is returned, then the contents of bpp and
235  * dipp are undefined.
236  *
237  * If the inode is new and has not yet been initialized, use xfs_imap()
238  * to determine the size and location of the buffer to read from disk.
239  * If the inode has already been mapped to its buffer and read in once,
240  * then use the mapping information stored in the inode rather than
241  * calling xfs_imap().  This allows us to avoid the overhead of looking
242  * at the inode btree for small block file systems (see xfs_dilocate()).
243  * We can tell whether the inode has been mapped in before by comparing
244  * its disk block address to 0.  Only uninitialized inodes will have
245  * 0 for the disk block address.
246  */
247 int
248 xfs_itobp(
249 	xfs_mount_t	*mp,
250 	xfs_trans_t	*tp,
251 	xfs_inode_t	*ip,
252 	xfs_dinode_t	**dipp,
253 	xfs_buf_t	**bpp,
254 	xfs_daddr_t	bno,
255 	uint		imap_flags)
256 {
257 	xfs_imap_t	imap;
258 	xfs_buf_t	*bp;
259 	int		error;
260 	int		i;
261 	int		ni;
262 
263 	if (ip->i_blkno == (xfs_daddr_t)0) {
264 		/*
265 		 * Call the space management code to find the location of the
266 		 * inode on disk.
267 		 */
268 		imap.im_blkno = bno;
269 		if ((error = xfs_imap(mp, tp, ip->i_ino, &imap,
270 					XFS_IMAP_LOOKUP | imap_flags)))
271 			return error;
272 
273 		/*
274 		 * If the inode number maps to a block outside the bounds
275 		 * of the file system then return NULL rather than calling
276 		 * read_buf and panicing when we get an error from the
277 		 * driver.
278 		 */
279 		if ((imap.im_blkno + imap.im_len) >
280 		    XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
281 #ifdef DEBUG
282 			xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: "
283 					"(imap.im_blkno (0x%llx) "
284 					"+ imap.im_len (0x%llx)) > "
285 					" XFS_FSB_TO_BB(mp, "
286 					"mp->m_sb.sb_dblocks) (0x%llx)",
287 					(unsigned long long) imap.im_blkno,
288 					(unsigned long long) imap.im_len,
289 					XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
290 #endif /* DEBUG */
291 			return XFS_ERROR(EINVAL);
292 		}
293 
294 		/*
295 		 * Fill in the fields in the inode that will be used to
296 		 * map the inode to its buffer from now on.
297 		 */
298 		ip->i_blkno = imap.im_blkno;
299 		ip->i_len = imap.im_len;
300 		ip->i_boffset = imap.im_boffset;
301 	} else {
302 		/*
303 		 * We've already mapped the inode once, so just use the
304 		 * mapping that we saved the first time.
305 		 */
306 		imap.im_blkno = ip->i_blkno;
307 		imap.im_len = ip->i_len;
308 		imap.im_boffset = ip->i_boffset;
309 	}
310 	ASSERT(bno == 0 || bno == imap.im_blkno);
311 
312 	/*
313 	 * Read in the buffer.  If tp is NULL, xfs_trans_read_buf() will
314 	 * default to just a read_buf() call.
315 	 */
316 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno,
317 				   (int)imap.im_len, XFS_BUF_LOCK, &bp);
318 	if (error) {
319 #ifdef DEBUG
320 		xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: "
321 				"xfs_trans_read_buf() returned error %d, "
322 				"imap.im_blkno 0x%llx, imap.im_len 0x%llx",
323 				error, (unsigned long long) imap.im_blkno,
324 				(unsigned long long) imap.im_len);
325 #endif /* DEBUG */
326 		return error;
327 	}
328 
329 	/*
330 	 * Validate the magic number and version of every inode in the buffer
331 	 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
332 	 * No validation is done here in userspace (xfs_repair).
333 	 */
334 #if !defined(__KERNEL__)
335 	ni = 0;
336 #elif defined(DEBUG)
337 	ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog;
338 #else	/* usual case */
339 	ni = 1;
340 #endif
341 
342 	for (i = 0; i < ni; i++) {
343 		int		di_ok;
344 		xfs_dinode_t	*dip;
345 
346 		dip = (xfs_dinode_t *)xfs_buf_offset(bp,
347 					(i << mp->m_sb.sb_inodelog));
348 		di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC &&
349 			    XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT));
350 		if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
351 						XFS_ERRTAG_ITOBP_INOTOBP,
352 						XFS_RANDOM_ITOBP_INOTOBP))) {
353 			if (imap_flags & XFS_IMAP_BULKSTAT) {
354 				xfs_trans_brelse(tp, bp);
355 				return XFS_ERROR(EINVAL);
356 			}
357 #ifdef DEBUG
358 			cmn_err(CE_ALERT,
359 					"Device %s - bad inode magic/vsn "
360 					"daddr %lld #%d (magic=%x)",
361 				XFS_BUFTARG_NAME(mp->m_ddev_targp),
362 				(unsigned long long)imap.im_blkno, i,
363 				INT_GET(dip->di_core.di_magic, ARCH_CONVERT));
364 #endif
365 			XFS_CORRUPTION_ERROR("xfs_itobp", XFS_ERRLEVEL_HIGH,
366 					     mp, dip);
367 			xfs_trans_brelse(tp, bp);
368 			return XFS_ERROR(EFSCORRUPTED);
369 		}
370 	}
371 
372 	xfs_inobp_check(mp, bp);
373 
374 	/*
375 	 * Mark the buffer as an inode buffer now that it looks good
376 	 */
377 	XFS_BUF_SET_VTYPE(bp, B_FS_INO);
378 
379 	/*
380 	 * Set *dipp to point to the on-disk inode in the buffer.
381 	 */
382 	*dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
383 	*bpp = bp;
384 	return 0;
385 }
386 
387 /*
388  * Move inode type and inode format specific information from the
389  * on-disk inode to the in-core inode.  For fifos, devs, and sockets
390  * this means set if_rdev to the proper value.  For files, directories,
391  * and symlinks this means to bring in the in-line data or extent
392  * pointers.  For a file in B-tree format, only the root is immediately
393  * brought in-core.  The rest will be in-lined in if_extents when it
394  * is first referenced (see xfs_iread_extents()).
395  */
396 STATIC int
397 xfs_iformat(
398 	xfs_inode_t		*ip,
399 	xfs_dinode_t		*dip)
400 {
401 	xfs_attr_shortform_t	*atp;
402 	int			size;
403 	int			error;
404 	xfs_fsize_t             di_size;
405 	ip->i_df.if_ext_max =
406 		XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
407 	error = 0;
408 
409 	if (unlikely(
410 	    INT_GET(dip->di_core.di_nextents, ARCH_CONVERT) +
411 		INT_GET(dip->di_core.di_anextents, ARCH_CONVERT) >
412 	    INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT))) {
413 		xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
414 			"corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
415 			(unsigned long long)ip->i_ino,
416 			(int)(INT_GET(dip->di_core.di_nextents, ARCH_CONVERT)
417 			    + INT_GET(dip->di_core.di_anextents, ARCH_CONVERT)),
418 			(unsigned long long)
419 			INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT));
420 		XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
421 				     ip->i_mount, dip);
422 		return XFS_ERROR(EFSCORRUPTED);
423 	}
424 
425 	if (unlikely(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT) > ip->i_mount->m_sb.sb_inodesize)) {
426 		xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
427 			"corrupt dinode %Lu, forkoff = 0x%x.",
428 			(unsigned long long)ip->i_ino,
429 			(int)(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT)));
430 		XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
431 				     ip->i_mount, dip);
432 		return XFS_ERROR(EFSCORRUPTED);
433 	}
434 
435 	switch (ip->i_d.di_mode & S_IFMT) {
436 	case S_IFIFO:
437 	case S_IFCHR:
438 	case S_IFBLK:
439 	case S_IFSOCK:
440 		if (unlikely(INT_GET(dip->di_core.di_format, ARCH_CONVERT) != XFS_DINODE_FMT_DEV)) {
441 			XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
442 					      ip->i_mount, dip);
443 			return XFS_ERROR(EFSCORRUPTED);
444 		}
445 		ip->i_d.di_size = 0;
446 		ip->i_df.if_u2.if_rdev = INT_GET(dip->di_u.di_dev, ARCH_CONVERT);
447 		break;
448 
449 	case S_IFREG:
450 	case S_IFLNK:
451 	case S_IFDIR:
452 		switch (INT_GET(dip->di_core.di_format, ARCH_CONVERT)) {
453 		case XFS_DINODE_FMT_LOCAL:
454 			/*
455 			 * no local regular files yet
456 			 */
457 			if (unlikely((INT_GET(dip->di_core.di_mode, ARCH_CONVERT) & S_IFMT) == S_IFREG)) {
458 				xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
459 					"corrupt inode %Lu "
460 					"(local format for regular file).",
461 					(unsigned long long) ip->i_ino);
462 				XFS_CORRUPTION_ERROR("xfs_iformat(4)",
463 						     XFS_ERRLEVEL_LOW,
464 						     ip->i_mount, dip);
465 				return XFS_ERROR(EFSCORRUPTED);
466 			}
467 
468 			di_size = INT_GET(dip->di_core.di_size, ARCH_CONVERT);
469 			if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
470 				xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
471 					"corrupt inode %Lu "
472 					"(bad size %Ld for local inode).",
473 					(unsigned long long) ip->i_ino,
474 					(long long) di_size);
475 				XFS_CORRUPTION_ERROR("xfs_iformat(5)",
476 						     XFS_ERRLEVEL_LOW,
477 						     ip->i_mount, dip);
478 				return XFS_ERROR(EFSCORRUPTED);
479 			}
480 
481 			size = (int)di_size;
482 			error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
483 			break;
484 		case XFS_DINODE_FMT_EXTENTS:
485 			error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
486 			break;
487 		case XFS_DINODE_FMT_BTREE:
488 			error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
489 			break;
490 		default:
491 			XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
492 					 ip->i_mount);
493 			return XFS_ERROR(EFSCORRUPTED);
494 		}
495 		break;
496 
497 	default:
498 		XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
499 		return XFS_ERROR(EFSCORRUPTED);
500 	}
501 	if (error) {
502 		return error;
503 	}
504 	if (!XFS_DFORK_Q(dip))
505 		return 0;
506 	ASSERT(ip->i_afp == NULL);
507 	ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
508 	ip->i_afp->if_ext_max =
509 		XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
510 	switch (INT_GET(dip->di_core.di_aformat, ARCH_CONVERT)) {
511 	case XFS_DINODE_FMT_LOCAL:
512 		atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
513 		size = be16_to_cpu(atp->hdr.totsize);
514 		error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
515 		break;
516 	case XFS_DINODE_FMT_EXTENTS:
517 		error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
518 		break;
519 	case XFS_DINODE_FMT_BTREE:
520 		error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
521 		break;
522 	default:
523 		error = XFS_ERROR(EFSCORRUPTED);
524 		break;
525 	}
526 	if (error) {
527 		kmem_zone_free(xfs_ifork_zone, ip->i_afp);
528 		ip->i_afp = NULL;
529 		xfs_idestroy_fork(ip, XFS_DATA_FORK);
530 	}
531 	return error;
532 }
533 
534 /*
535  * The file is in-lined in the on-disk inode.
536  * If it fits into if_inline_data, then copy
537  * it there, otherwise allocate a buffer for it
538  * and copy the data there.  Either way, set
539  * if_data to point at the data.
540  * If we allocate a buffer for the data, make
541  * sure that its size is a multiple of 4 and
542  * record the real size in i_real_bytes.
543  */
544 STATIC int
545 xfs_iformat_local(
546 	xfs_inode_t	*ip,
547 	xfs_dinode_t	*dip,
548 	int		whichfork,
549 	int		size)
550 {
551 	xfs_ifork_t	*ifp;
552 	int		real_size;
553 
554 	/*
555 	 * If the size is unreasonable, then something
556 	 * is wrong and we just bail out rather than crash in
557 	 * kmem_alloc() or memcpy() below.
558 	 */
559 	if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
560 		xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
561 			"corrupt inode %Lu "
562 			"(bad size %d for local fork, size = %d).",
563 			(unsigned long long) ip->i_ino, size,
564 			XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
565 		XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
566 				     ip->i_mount, dip);
567 		return XFS_ERROR(EFSCORRUPTED);
568 	}
569 	ifp = XFS_IFORK_PTR(ip, whichfork);
570 	real_size = 0;
571 	if (size == 0)
572 		ifp->if_u1.if_data = NULL;
573 	else if (size <= sizeof(ifp->if_u2.if_inline_data))
574 		ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
575 	else {
576 		real_size = roundup(size, 4);
577 		ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
578 	}
579 	ifp->if_bytes = size;
580 	ifp->if_real_bytes = real_size;
581 	if (size)
582 		memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
583 	ifp->if_flags &= ~XFS_IFEXTENTS;
584 	ifp->if_flags |= XFS_IFINLINE;
585 	return 0;
586 }
587 
588 /*
589  * The file consists of a set of extents all
590  * of which fit into the on-disk inode.
591  * If there are few enough extents to fit into
592  * the if_inline_ext, then copy them there.
593  * Otherwise allocate a buffer for them and copy
594  * them into it.  Either way, set if_extents
595  * to point at the extents.
596  */
597 STATIC int
598 xfs_iformat_extents(
599 	xfs_inode_t	*ip,
600 	xfs_dinode_t	*dip,
601 	int		whichfork)
602 {
603 	xfs_bmbt_rec_t	*ep, *dp;
604 	xfs_ifork_t	*ifp;
605 	int		nex;
606 	int		size;
607 	int		i;
608 
609 	ifp = XFS_IFORK_PTR(ip, whichfork);
610 	nex = XFS_DFORK_NEXTENTS(dip, whichfork);
611 	size = nex * (uint)sizeof(xfs_bmbt_rec_t);
612 
613 	/*
614 	 * If the number of extents is unreasonable, then something
615 	 * is wrong and we just bail out rather than crash in
616 	 * kmem_alloc() or memcpy() below.
617 	 */
618 	if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
619 		xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
620 			"corrupt inode %Lu ((a)extents = %d).",
621 			(unsigned long long) ip->i_ino, nex);
622 		XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
623 				     ip->i_mount, dip);
624 		return XFS_ERROR(EFSCORRUPTED);
625 	}
626 
627 	ifp->if_real_bytes = 0;
628 	if (nex == 0)
629 		ifp->if_u1.if_extents = NULL;
630 	else if (nex <= XFS_INLINE_EXTS)
631 		ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
632 	else
633 		xfs_iext_add(ifp, 0, nex);
634 
635 	ifp->if_bytes = size;
636 	if (size) {
637 		dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
638 		xfs_validate_extents(ifp, nex, 1, XFS_EXTFMT_INODE(ip));
639 		for (i = 0; i < nex; i++, dp++) {
640 			ep = xfs_iext_get_ext(ifp, i);
641 			ep->l0 = INT_GET(get_unaligned((__uint64_t*)&dp->l0),
642 								ARCH_CONVERT);
643 			ep->l1 = INT_GET(get_unaligned((__uint64_t*)&dp->l1),
644 								ARCH_CONVERT);
645 		}
646 		xfs_bmap_trace_exlist("xfs_iformat_extents", ip, nex,
647 			whichfork);
648 		if (whichfork != XFS_DATA_FORK ||
649 			XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
650 				if (unlikely(xfs_check_nostate_extents(
651 				    ifp, 0, nex))) {
652 					XFS_ERROR_REPORT("xfs_iformat_extents(2)",
653 							 XFS_ERRLEVEL_LOW,
654 							 ip->i_mount);
655 					return XFS_ERROR(EFSCORRUPTED);
656 				}
657 	}
658 	ifp->if_flags |= XFS_IFEXTENTS;
659 	return 0;
660 }
661 
662 /*
663  * The file has too many extents to fit into
664  * the inode, so they are in B-tree format.
665  * Allocate a buffer for the root of the B-tree
666  * and copy the root into it.  The i_extents
667  * field will remain NULL until all of the
668  * extents are read in (when they are needed).
669  */
670 STATIC int
671 xfs_iformat_btree(
672 	xfs_inode_t		*ip,
673 	xfs_dinode_t		*dip,
674 	int			whichfork)
675 {
676 	xfs_bmdr_block_t	*dfp;
677 	xfs_ifork_t		*ifp;
678 	/* REFERENCED */
679 	int			nrecs;
680 	int			size;
681 
682 	ifp = XFS_IFORK_PTR(ip, whichfork);
683 	dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
684 	size = XFS_BMAP_BROOT_SPACE(dfp);
685 	nrecs = XFS_BMAP_BROOT_NUMRECS(dfp);
686 
687 	/*
688 	 * blow out if -- fork has less extents than can fit in
689 	 * fork (fork shouldn't be a btree format), root btree
690 	 * block has more records than can fit into the fork,
691 	 * or the number of extents is greater than the number of
692 	 * blocks.
693 	 */
694 	if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max
695 	    || XFS_BMDR_SPACE_CALC(nrecs) >
696 			XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)
697 	    || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
698 		xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
699 			"corrupt inode %Lu (btree).",
700 			(unsigned long long) ip->i_ino);
701 		XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
702 				 ip->i_mount);
703 		return XFS_ERROR(EFSCORRUPTED);
704 	}
705 
706 	ifp->if_broot_bytes = size;
707 	ifp->if_broot = kmem_alloc(size, KM_SLEEP);
708 	ASSERT(ifp->if_broot != NULL);
709 	/*
710 	 * Copy and convert from the on-disk structure
711 	 * to the in-memory structure.
712 	 */
713 	xfs_bmdr_to_bmbt(dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
714 		ifp->if_broot, size);
715 	ifp->if_flags &= ~XFS_IFEXTENTS;
716 	ifp->if_flags |= XFS_IFBROOT;
717 
718 	return 0;
719 }
720 
721 /*
722  * xfs_xlate_dinode_core - translate an xfs_inode_core_t between ondisk
723  * and native format
724  *
725  * buf  = on-disk representation
726  * dip  = native representation
727  * dir  = direction - +ve -> disk to native
728  *                    -ve -> native to disk
729  */
730 void
731 xfs_xlate_dinode_core(
732 	xfs_caddr_t		buf,
733 	xfs_dinode_core_t	*dip,
734 	int			dir)
735 {
736 	xfs_dinode_core_t	*buf_core = (xfs_dinode_core_t *)buf;
737 	xfs_dinode_core_t	*mem_core = (xfs_dinode_core_t *)dip;
738 	xfs_arch_t		arch = ARCH_CONVERT;
739 
740 	ASSERT(dir);
741 
742 	INT_XLATE(buf_core->di_magic, mem_core->di_magic, dir, arch);
743 	INT_XLATE(buf_core->di_mode, mem_core->di_mode, dir, arch);
744 	INT_XLATE(buf_core->di_version,	mem_core->di_version, dir, arch);
745 	INT_XLATE(buf_core->di_format, mem_core->di_format, dir, arch);
746 	INT_XLATE(buf_core->di_onlink, mem_core->di_onlink, dir, arch);
747 	INT_XLATE(buf_core->di_uid, mem_core->di_uid, dir, arch);
748 	INT_XLATE(buf_core->di_gid, mem_core->di_gid, dir, arch);
749 	INT_XLATE(buf_core->di_nlink, mem_core->di_nlink, dir, arch);
750 	INT_XLATE(buf_core->di_projid, mem_core->di_projid, dir, arch);
751 
752 	if (dir > 0) {
753 		memcpy(mem_core->di_pad, buf_core->di_pad,
754 			sizeof(buf_core->di_pad));
755 	} else {
756 		memcpy(buf_core->di_pad, mem_core->di_pad,
757 			sizeof(buf_core->di_pad));
758 	}
759 
760 	INT_XLATE(buf_core->di_flushiter, mem_core->di_flushiter, dir, arch);
761 
762 	INT_XLATE(buf_core->di_atime.t_sec, mem_core->di_atime.t_sec,
763 			dir, arch);
764 	INT_XLATE(buf_core->di_atime.t_nsec, mem_core->di_atime.t_nsec,
765 			dir, arch);
766 	INT_XLATE(buf_core->di_mtime.t_sec, mem_core->di_mtime.t_sec,
767 			dir, arch);
768 	INT_XLATE(buf_core->di_mtime.t_nsec, mem_core->di_mtime.t_nsec,
769 			dir, arch);
770 	INT_XLATE(buf_core->di_ctime.t_sec, mem_core->di_ctime.t_sec,
771 			dir, arch);
772 	INT_XLATE(buf_core->di_ctime.t_nsec, mem_core->di_ctime.t_nsec,
773 			dir, arch);
774 	INT_XLATE(buf_core->di_size, mem_core->di_size, dir, arch);
775 	INT_XLATE(buf_core->di_nblocks, mem_core->di_nblocks, dir, arch);
776 	INT_XLATE(buf_core->di_extsize, mem_core->di_extsize, dir, arch);
777 	INT_XLATE(buf_core->di_nextents, mem_core->di_nextents, dir, arch);
778 	INT_XLATE(buf_core->di_anextents, mem_core->di_anextents, dir, arch);
779 	INT_XLATE(buf_core->di_forkoff, mem_core->di_forkoff, dir, arch);
780 	INT_XLATE(buf_core->di_aformat, mem_core->di_aformat, dir, arch);
781 	INT_XLATE(buf_core->di_dmevmask, mem_core->di_dmevmask, dir, arch);
782 	INT_XLATE(buf_core->di_dmstate, mem_core->di_dmstate, dir, arch);
783 	INT_XLATE(buf_core->di_flags, mem_core->di_flags, dir, arch);
784 	INT_XLATE(buf_core->di_gen, mem_core->di_gen, dir, arch);
785 }
786 
787 STATIC uint
788 _xfs_dic2xflags(
789 	__uint16_t		di_flags)
790 {
791 	uint			flags = 0;
792 
793 	if (di_flags & XFS_DIFLAG_ANY) {
794 		if (di_flags & XFS_DIFLAG_REALTIME)
795 			flags |= XFS_XFLAG_REALTIME;
796 		if (di_flags & XFS_DIFLAG_PREALLOC)
797 			flags |= XFS_XFLAG_PREALLOC;
798 		if (di_flags & XFS_DIFLAG_IMMUTABLE)
799 			flags |= XFS_XFLAG_IMMUTABLE;
800 		if (di_flags & XFS_DIFLAG_APPEND)
801 			flags |= XFS_XFLAG_APPEND;
802 		if (di_flags & XFS_DIFLAG_SYNC)
803 			flags |= XFS_XFLAG_SYNC;
804 		if (di_flags & XFS_DIFLAG_NOATIME)
805 			flags |= XFS_XFLAG_NOATIME;
806 		if (di_flags & XFS_DIFLAG_NODUMP)
807 			flags |= XFS_XFLAG_NODUMP;
808 		if (di_flags & XFS_DIFLAG_RTINHERIT)
809 			flags |= XFS_XFLAG_RTINHERIT;
810 		if (di_flags & XFS_DIFLAG_PROJINHERIT)
811 			flags |= XFS_XFLAG_PROJINHERIT;
812 		if (di_flags & XFS_DIFLAG_NOSYMLINKS)
813 			flags |= XFS_XFLAG_NOSYMLINKS;
814 		if (di_flags & XFS_DIFLAG_EXTSIZE)
815 			flags |= XFS_XFLAG_EXTSIZE;
816 		if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
817 			flags |= XFS_XFLAG_EXTSZINHERIT;
818 		if (di_flags & XFS_DIFLAG_NODEFRAG)
819 			flags |= XFS_XFLAG_NODEFRAG;
820 	}
821 
822 	return flags;
823 }
824 
825 uint
826 xfs_ip2xflags(
827 	xfs_inode_t		*ip)
828 {
829 	xfs_dinode_core_t	*dic = &ip->i_d;
830 
831 	return _xfs_dic2xflags(dic->di_flags) |
832 				(XFS_CFORK_Q(dic) ? XFS_XFLAG_HASATTR : 0);
833 }
834 
835 uint
836 xfs_dic2xflags(
837 	xfs_dinode_core_t	*dic)
838 {
839 	return _xfs_dic2xflags(INT_GET(dic->di_flags, ARCH_CONVERT)) |
840 				(XFS_CFORK_Q_DISK(dic) ? XFS_XFLAG_HASATTR : 0);
841 }
842 
843 /*
844  * Given a mount structure and an inode number, return a pointer
845  * to a newly allocated in-core inode corresponding to the given
846  * inode number.
847  *
848  * Initialize the inode's attributes and extent pointers if it
849  * already has them (it will not if the inode has no links).
850  */
851 int
852 xfs_iread(
853 	xfs_mount_t	*mp,
854 	xfs_trans_t	*tp,
855 	xfs_ino_t	ino,
856 	xfs_inode_t	**ipp,
857 	xfs_daddr_t	bno)
858 {
859 	xfs_buf_t	*bp;
860 	xfs_dinode_t	*dip;
861 	xfs_inode_t	*ip;
862 	int		error;
863 
864 	ASSERT(xfs_inode_zone != NULL);
865 
866 	ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP);
867 	ip->i_ino = ino;
868 	ip->i_mount = mp;
869 
870 	/*
871 	 * Get pointer's to the on-disk inode and the buffer containing it.
872 	 * If the inode number refers to a block outside the file system
873 	 * then xfs_itobp() will return NULL.  In this case we should
874 	 * return NULL as well.  Set i_blkno to 0 so that xfs_itobp() will
875 	 * know that this is a new incore inode.
876 	 */
877 	error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, 0);
878 	if (error) {
879 		kmem_zone_free(xfs_inode_zone, ip);
880 		return error;
881 	}
882 
883 	/*
884 	 * Initialize inode's trace buffers.
885 	 * Do this before xfs_iformat in case it adds entries.
886 	 */
887 #ifdef XFS_BMAP_TRACE
888 	ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP);
889 #endif
890 #ifdef XFS_BMBT_TRACE
891 	ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_SLEEP);
892 #endif
893 #ifdef XFS_RW_TRACE
894 	ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_SLEEP);
895 #endif
896 #ifdef XFS_ILOCK_TRACE
897 	ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_SLEEP);
898 #endif
899 #ifdef XFS_DIR2_TRACE
900 	ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_SLEEP);
901 #endif
902 
903 	/*
904 	 * If we got something that isn't an inode it means someone
905 	 * (nfs or dmi) has a stale handle.
906 	 */
907 	if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC) {
908 		kmem_zone_free(xfs_inode_zone, ip);
909 		xfs_trans_brelse(tp, bp);
910 #ifdef DEBUG
911 		xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
912 				"dip->di_core.di_magic (0x%x) != "
913 				"XFS_DINODE_MAGIC (0x%x)",
914 				INT_GET(dip->di_core.di_magic, ARCH_CONVERT),
915 				XFS_DINODE_MAGIC);
916 #endif /* DEBUG */
917 		return XFS_ERROR(EINVAL);
918 	}
919 
920 	/*
921 	 * If the on-disk inode is already linked to a directory
922 	 * entry, copy all of the inode into the in-core inode.
923 	 * xfs_iformat() handles copying in the inode format
924 	 * specific information.
925 	 * Otherwise, just get the truly permanent information.
926 	 */
927 	if (dip->di_core.di_mode) {
928 		xfs_xlate_dinode_core((xfs_caddr_t)&dip->di_core,
929 		     &(ip->i_d), 1);
930 		error = xfs_iformat(ip, dip);
931 		if (error)  {
932 			kmem_zone_free(xfs_inode_zone, ip);
933 			xfs_trans_brelse(tp, bp);
934 #ifdef DEBUG
935 			xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
936 					"xfs_iformat() returned error %d",
937 					error);
938 #endif /* DEBUG */
939 			return error;
940 		}
941 	} else {
942 		ip->i_d.di_magic = INT_GET(dip->di_core.di_magic, ARCH_CONVERT);
943 		ip->i_d.di_version = INT_GET(dip->di_core.di_version, ARCH_CONVERT);
944 		ip->i_d.di_gen = INT_GET(dip->di_core.di_gen, ARCH_CONVERT);
945 		ip->i_d.di_flushiter = INT_GET(dip->di_core.di_flushiter, ARCH_CONVERT);
946 		/*
947 		 * Make sure to pull in the mode here as well in
948 		 * case the inode is released without being used.
949 		 * This ensures that xfs_inactive() will see that
950 		 * the inode is already free and not try to mess
951 		 * with the uninitialized part of it.
952 		 */
953 		ip->i_d.di_mode = 0;
954 		/*
955 		 * Initialize the per-fork minima and maxima for a new
956 		 * inode here.  xfs_iformat will do it for old inodes.
957 		 */
958 		ip->i_df.if_ext_max =
959 			XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
960 	}
961 
962 	INIT_LIST_HEAD(&ip->i_reclaim);
963 
964 	/*
965 	 * The inode format changed when we moved the link count and
966 	 * made it 32 bits long.  If this is an old format inode,
967 	 * convert it in memory to look like a new one.  If it gets
968 	 * flushed to disk we will convert back before flushing or
969 	 * logging it.  We zero out the new projid field and the old link
970 	 * count field.  We'll handle clearing the pad field (the remains
971 	 * of the old uuid field) when we actually convert the inode to
972 	 * the new format. We don't change the version number so that we
973 	 * can distinguish this from a real new format inode.
974 	 */
975 	if (ip->i_d.di_version == XFS_DINODE_VERSION_1) {
976 		ip->i_d.di_nlink = ip->i_d.di_onlink;
977 		ip->i_d.di_onlink = 0;
978 		ip->i_d.di_projid = 0;
979 	}
980 
981 	ip->i_delayed_blks = 0;
982 
983 	/*
984 	 * Mark the buffer containing the inode as something to keep
985 	 * around for a while.  This helps to keep recently accessed
986 	 * meta-data in-core longer.
987 	 */
988 	 XFS_BUF_SET_REF(bp, XFS_INO_REF);
989 
990 	/*
991 	 * Use xfs_trans_brelse() to release the buffer containing the
992 	 * on-disk inode, because it was acquired with xfs_trans_read_buf()
993 	 * in xfs_itobp() above.  If tp is NULL, this is just a normal
994 	 * brelse().  If we're within a transaction, then xfs_trans_brelse()
995 	 * will only release the buffer if it is not dirty within the
996 	 * transaction.  It will be OK to release the buffer in this case,
997 	 * because inodes on disk are never destroyed and we will be
998 	 * locking the new in-core inode before putting it in the hash
999 	 * table where other processes can find it.  Thus we don't have
1000 	 * to worry about the inode being changed just because we released
1001 	 * the buffer.
1002 	 */
1003 	xfs_trans_brelse(tp, bp);
1004 	*ipp = ip;
1005 	return 0;
1006 }
1007 
1008 /*
1009  * Read in extents from a btree-format inode.
1010  * Allocate and fill in if_extents.  Real work is done in xfs_bmap.c.
1011  */
1012 int
1013 xfs_iread_extents(
1014 	xfs_trans_t	*tp,
1015 	xfs_inode_t	*ip,
1016 	int		whichfork)
1017 {
1018 	int		error;
1019 	xfs_ifork_t	*ifp;
1020 	xfs_extnum_t	nextents;
1021 	size_t		size;
1022 
1023 	if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
1024 		XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
1025 				 ip->i_mount);
1026 		return XFS_ERROR(EFSCORRUPTED);
1027 	}
1028 	nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
1029 	size = nextents * sizeof(xfs_bmbt_rec_t);
1030 	ifp = XFS_IFORK_PTR(ip, whichfork);
1031 
1032 	/*
1033 	 * We know that the size is valid (it's checked in iformat_btree)
1034 	 */
1035 	ifp->if_lastex = NULLEXTNUM;
1036 	ifp->if_bytes = ifp->if_real_bytes = 0;
1037 	ifp->if_flags |= XFS_IFEXTENTS;
1038 	xfs_iext_add(ifp, 0, nextents);
1039 	error = xfs_bmap_read_extents(tp, ip, whichfork);
1040 	if (error) {
1041 		xfs_iext_destroy(ifp);
1042 		ifp->if_flags &= ~XFS_IFEXTENTS;
1043 		return error;
1044 	}
1045 	xfs_validate_extents(ifp, nextents, 0, XFS_EXTFMT_INODE(ip));
1046 	return 0;
1047 }
1048 
1049 /*
1050  * Allocate an inode on disk and return a copy of its in-core version.
1051  * The in-core inode is locked exclusively.  Set mode, nlink, and rdev
1052  * appropriately within the inode.  The uid and gid for the inode are
1053  * set according to the contents of the given cred structure.
1054  *
1055  * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
1056  * has a free inode available, call xfs_iget()
1057  * to obtain the in-core version of the allocated inode.  Finally,
1058  * fill in the inode and log its initial contents.  In this case,
1059  * ialloc_context would be set to NULL and call_again set to false.
1060  *
1061  * If xfs_dialloc() does not have an available inode,
1062  * it will replenish its supply by doing an allocation. Since we can
1063  * only do one allocation within a transaction without deadlocks, we
1064  * must commit the current transaction before returning the inode itself.
1065  * In this case, therefore, we will set call_again to true and return.
1066  * The caller should then commit the current transaction, start a new
1067  * transaction, and call xfs_ialloc() again to actually get the inode.
1068  *
1069  * To ensure that some other process does not grab the inode that
1070  * was allocated during the first call to xfs_ialloc(), this routine
1071  * also returns the [locked] bp pointing to the head of the freelist
1072  * as ialloc_context.  The caller should hold this buffer across
1073  * the commit and pass it back into this routine on the second call.
1074  */
1075 int
1076 xfs_ialloc(
1077 	xfs_trans_t	*tp,
1078 	xfs_inode_t	*pip,
1079 	mode_t		mode,
1080 	xfs_nlink_t	nlink,
1081 	xfs_dev_t	rdev,
1082 	cred_t		*cr,
1083 	xfs_prid_t	prid,
1084 	int		okalloc,
1085 	xfs_buf_t	**ialloc_context,
1086 	boolean_t	*call_again,
1087 	xfs_inode_t	**ipp)
1088 {
1089 	xfs_ino_t	ino;
1090 	xfs_inode_t	*ip;
1091 	bhv_vnode_t	*vp;
1092 	uint		flags;
1093 	int		error;
1094 
1095 	/*
1096 	 * Call the space management code to pick
1097 	 * the on-disk inode to be allocated.
1098 	 */
1099 	error = xfs_dialloc(tp, pip->i_ino, mode, okalloc,
1100 			    ialloc_context, call_again, &ino);
1101 	if (error != 0) {
1102 		return error;
1103 	}
1104 	if (*call_again || ino == NULLFSINO) {
1105 		*ipp = NULL;
1106 		return 0;
1107 	}
1108 	ASSERT(*ialloc_context == NULL);
1109 
1110 	/*
1111 	 * Get the in-core inode with the lock held exclusively.
1112 	 * This is because we're setting fields here we need
1113 	 * to prevent others from looking at until we're done.
1114 	 */
1115 	error = xfs_trans_iget(tp->t_mountp, tp, ino,
1116 			IGET_CREATE, XFS_ILOCK_EXCL, &ip);
1117 	if (error != 0) {
1118 		return error;
1119 	}
1120 	ASSERT(ip != NULL);
1121 
1122 	vp = XFS_ITOV(ip);
1123 	ip->i_d.di_mode = (__uint16_t)mode;
1124 	ip->i_d.di_onlink = 0;
1125 	ip->i_d.di_nlink = nlink;
1126 	ASSERT(ip->i_d.di_nlink == nlink);
1127 	ip->i_d.di_uid = current_fsuid(cr);
1128 	ip->i_d.di_gid = current_fsgid(cr);
1129 	ip->i_d.di_projid = prid;
1130 	memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
1131 
1132 	/*
1133 	 * If the superblock version is up to where we support new format
1134 	 * inodes and this is currently an old format inode, then change
1135 	 * the inode version number now.  This way we only do the conversion
1136 	 * here rather than here and in the flush/logging code.
1137 	 */
1138 	if (XFS_SB_VERSION_HASNLINK(&tp->t_mountp->m_sb) &&
1139 	    ip->i_d.di_version == XFS_DINODE_VERSION_1) {
1140 		ip->i_d.di_version = XFS_DINODE_VERSION_2;
1141 		/*
1142 		 * We've already zeroed the old link count, the projid field,
1143 		 * and the pad field.
1144 		 */
1145 	}
1146 
1147 	/*
1148 	 * Project ids won't be stored on disk if we are using a version 1 inode.
1149 	 */
1150 	if ( (prid != 0) && (ip->i_d.di_version == XFS_DINODE_VERSION_1))
1151 		xfs_bump_ino_vers2(tp, ip);
1152 
1153 	if (XFS_INHERIT_GID(pip, vp->v_vfsp)) {
1154 		ip->i_d.di_gid = pip->i_d.di_gid;
1155 		if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) {
1156 			ip->i_d.di_mode |= S_ISGID;
1157 		}
1158 	}
1159 
1160 	/*
1161 	 * If the group ID of the new file does not match the effective group
1162 	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
1163 	 * (and only if the irix_sgid_inherit compatibility variable is set).
1164 	 */
1165 	if ((irix_sgid_inherit) &&
1166 	    (ip->i_d.di_mode & S_ISGID) &&
1167 	    (!in_group_p((gid_t)ip->i_d.di_gid))) {
1168 		ip->i_d.di_mode &= ~S_ISGID;
1169 	}
1170 
1171 	ip->i_d.di_size = 0;
1172 	ip->i_d.di_nextents = 0;
1173 	ASSERT(ip->i_d.di_nblocks == 0);
1174 	xfs_ichgtime(ip, XFS_ICHGTIME_CHG|XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD);
1175 	/*
1176 	 * di_gen will have been taken care of in xfs_iread.
1177 	 */
1178 	ip->i_d.di_extsize = 0;
1179 	ip->i_d.di_dmevmask = 0;
1180 	ip->i_d.di_dmstate = 0;
1181 	ip->i_d.di_flags = 0;
1182 	flags = XFS_ILOG_CORE;
1183 	switch (mode & S_IFMT) {
1184 	case S_IFIFO:
1185 	case S_IFCHR:
1186 	case S_IFBLK:
1187 	case S_IFSOCK:
1188 		ip->i_d.di_format = XFS_DINODE_FMT_DEV;
1189 		ip->i_df.if_u2.if_rdev = rdev;
1190 		ip->i_df.if_flags = 0;
1191 		flags |= XFS_ILOG_DEV;
1192 		break;
1193 	case S_IFREG:
1194 	case S_IFDIR:
1195 		if (unlikely(pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
1196 			uint	di_flags = 0;
1197 
1198 			if ((mode & S_IFMT) == S_IFDIR) {
1199 				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1200 					di_flags |= XFS_DIFLAG_RTINHERIT;
1201 				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1202 					di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1203 					ip->i_d.di_extsize = pip->i_d.di_extsize;
1204 				}
1205 			} else if ((mode & S_IFMT) == S_IFREG) {
1206 				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) {
1207 					di_flags |= XFS_DIFLAG_REALTIME;
1208 					ip->i_iocore.io_flags |= XFS_IOCORE_RT;
1209 				}
1210 				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1211 					di_flags |= XFS_DIFLAG_EXTSIZE;
1212 					ip->i_d.di_extsize = pip->i_d.di_extsize;
1213 				}
1214 			}
1215 			if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
1216 			    xfs_inherit_noatime)
1217 				di_flags |= XFS_DIFLAG_NOATIME;
1218 			if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
1219 			    xfs_inherit_nodump)
1220 				di_flags |= XFS_DIFLAG_NODUMP;
1221 			if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
1222 			    xfs_inherit_sync)
1223 				di_flags |= XFS_DIFLAG_SYNC;
1224 			if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
1225 			    xfs_inherit_nosymlinks)
1226 				di_flags |= XFS_DIFLAG_NOSYMLINKS;
1227 			if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1228 				di_flags |= XFS_DIFLAG_PROJINHERIT;
1229 			if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
1230 			    xfs_inherit_nodefrag)
1231 				di_flags |= XFS_DIFLAG_NODEFRAG;
1232 			ip->i_d.di_flags |= di_flags;
1233 		}
1234 		/* FALLTHROUGH */
1235 	case S_IFLNK:
1236 		ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
1237 		ip->i_df.if_flags = XFS_IFEXTENTS;
1238 		ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
1239 		ip->i_df.if_u1.if_extents = NULL;
1240 		break;
1241 	default:
1242 		ASSERT(0);
1243 	}
1244 	/*
1245 	 * Attribute fork settings for new inode.
1246 	 */
1247 	ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1248 	ip->i_d.di_anextents = 0;
1249 
1250 	/*
1251 	 * Log the new values stuffed into the inode.
1252 	 */
1253 	xfs_trans_log_inode(tp, ip, flags);
1254 
1255 	/* now that we have an i_mode we can setup inode ops and unlock */
1256 	bhv_vfs_init_vnode(XFS_MTOVFS(tp->t_mountp), vp, XFS_ITOBHV(ip), 1);
1257 
1258 	*ipp = ip;
1259 	return 0;
1260 }
1261 
1262 /*
1263  * Check to make sure that there are no blocks allocated to the
1264  * file beyond the size of the file.  We don't check this for
1265  * files with fixed size extents or real time extents, but we
1266  * at least do it for regular files.
1267  */
1268 #ifdef DEBUG
1269 void
1270 xfs_isize_check(
1271 	xfs_mount_t	*mp,
1272 	xfs_inode_t	*ip,
1273 	xfs_fsize_t	isize)
1274 {
1275 	xfs_fileoff_t	map_first;
1276 	int		nimaps;
1277 	xfs_bmbt_irec_t	imaps[2];
1278 
1279 	if ((ip->i_d.di_mode & S_IFMT) != S_IFREG)
1280 		return;
1281 
1282 	if (ip->i_d.di_flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_EXTSIZE))
1283 		return;
1284 
1285 	nimaps = 2;
1286 	map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
1287 	/*
1288 	 * The filesystem could be shutting down, so bmapi may return
1289 	 * an error.
1290 	 */
1291 	if (xfs_bmapi(NULL, ip, map_first,
1292 			 (XFS_B_TO_FSB(mp,
1293 				       (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) -
1294 			  map_first),
1295 			 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps,
1296 			 NULL, NULL))
1297 	    return;
1298 	ASSERT(nimaps == 1);
1299 	ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
1300 }
1301 #endif	/* DEBUG */
1302 
1303 /*
1304  * Calculate the last possible buffered byte in a file.  This must
1305  * include data that was buffered beyond the EOF by the write code.
1306  * This also needs to deal with overflowing the xfs_fsize_t type
1307  * which can happen for sizes near the limit.
1308  *
1309  * We also need to take into account any blocks beyond the EOF.  It
1310  * may be the case that they were buffered by a write which failed.
1311  * In that case the pages will still be in memory, but the inode size
1312  * will never have been updated.
1313  */
1314 xfs_fsize_t
1315 xfs_file_last_byte(
1316 	xfs_inode_t	*ip)
1317 {
1318 	xfs_mount_t	*mp;
1319 	xfs_fsize_t	last_byte;
1320 	xfs_fileoff_t	last_block;
1321 	xfs_fileoff_t	size_last_block;
1322 	int		error;
1323 
1324 	ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE | MR_ACCESS));
1325 
1326 	mp = ip->i_mount;
1327 	/*
1328 	 * Only check for blocks beyond the EOF if the extents have
1329 	 * been read in.  This eliminates the need for the inode lock,
1330 	 * and it also saves us from looking when it really isn't
1331 	 * necessary.
1332 	 */
1333 	if (ip->i_df.if_flags & XFS_IFEXTENTS) {
1334 		error = xfs_bmap_last_offset(NULL, ip, &last_block,
1335 			XFS_DATA_FORK);
1336 		if (error) {
1337 			last_block = 0;
1338 		}
1339 	} else {
1340 		last_block = 0;
1341 	}
1342 	size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_d.di_size);
1343 	last_block = XFS_FILEOFF_MAX(last_block, size_last_block);
1344 
1345 	last_byte = XFS_FSB_TO_B(mp, last_block);
1346 	if (last_byte < 0) {
1347 		return XFS_MAXIOFFSET(mp);
1348 	}
1349 	last_byte += (1 << mp->m_writeio_log);
1350 	if (last_byte < 0) {
1351 		return XFS_MAXIOFFSET(mp);
1352 	}
1353 	return last_byte;
1354 }
1355 
1356 #if defined(XFS_RW_TRACE)
1357 STATIC void
1358 xfs_itrunc_trace(
1359 	int		tag,
1360 	xfs_inode_t	*ip,
1361 	int		flag,
1362 	xfs_fsize_t	new_size,
1363 	xfs_off_t	toss_start,
1364 	xfs_off_t	toss_finish)
1365 {
1366 	if (ip->i_rwtrace == NULL) {
1367 		return;
1368 	}
1369 
1370 	ktrace_enter(ip->i_rwtrace,
1371 		     (void*)((long)tag),
1372 		     (void*)ip,
1373 		     (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff),
1374 		     (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff),
1375 		     (void*)((long)flag),
1376 		     (void*)(unsigned long)((new_size >> 32) & 0xffffffff),
1377 		     (void*)(unsigned long)(new_size & 0xffffffff),
1378 		     (void*)(unsigned long)((toss_start >> 32) & 0xffffffff),
1379 		     (void*)(unsigned long)(toss_start & 0xffffffff),
1380 		     (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff),
1381 		     (void*)(unsigned long)(toss_finish & 0xffffffff),
1382 		     (void*)(unsigned long)current_cpu(),
1383 		     (void*)(unsigned long)current_pid(),
1384 		     (void*)NULL,
1385 		     (void*)NULL,
1386 		     (void*)NULL);
1387 }
1388 #else
1389 #define	xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish)
1390 #endif
1391 
1392 /*
1393  * Start the truncation of the file to new_size.  The new size
1394  * must be smaller than the current size.  This routine will
1395  * clear the buffer and page caches of file data in the removed
1396  * range, and xfs_itruncate_finish() will remove the underlying
1397  * disk blocks.
1398  *
1399  * The inode must have its I/O lock locked EXCLUSIVELY, and it
1400  * must NOT have the inode lock held at all.  This is because we're
1401  * calling into the buffer/page cache code and we can't hold the
1402  * inode lock when we do so.
1403  *
1404  * We need to wait for any direct I/Os in flight to complete before we
1405  * proceed with the truncate. This is needed to prevent the extents
1406  * being read or written by the direct I/Os from being removed while the
1407  * I/O is in flight as there is no other method of synchronising
1408  * direct I/O with the truncate operation.  Also, because we hold
1409  * the IOLOCK in exclusive mode, we prevent new direct I/Os from being
1410  * started until the truncate completes and drops the lock. Essentially,
1411  * the vn_iowait() call forms an I/O barrier that provides strict ordering
1412  * between direct I/Os and the truncate operation.
1413  *
1414  * The flags parameter can have either the value XFS_ITRUNC_DEFINITE
1415  * or XFS_ITRUNC_MAYBE.  The XFS_ITRUNC_MAYBE value should be used
1416  * in the case that the caller is locking things out of order and
1417  * may not be able to call xfs_itruncate_finish() with the inode lock
1418  * held without dropping the I/O lock.  If the caller must drop the
1419  * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start()
1420  * must be called again with all the same restrictions as the initial
1421  * call.
1422  */
1423 void
1424 xfs_itruncate_start(
1425 	xfs_inode_t	*ip,
1426 	uint		flags,
1427 	xfs_fsize_t	new_size)
1428 {
1429 	xfs_fsize_t	last_byte;
1430 	xfs_off_t	toss_start;
1431 	xfs_mount_t	*mp;
1432 	bhv_vnode_t	*vp;
1433 
1434 	ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);
1435 	ASSERT((new_size == 0) || (new_size <= ip->i_d.di_size));
1436 	ASSERT((flags == XFS_ITRUNC_DEFINITE) ||
1437 	       (flags == XFS_ITRUNC_MAYBE));
1438 
1439 	mp = ip->i_mount;
1440 	vp = XFS_ITOV(ip);
1441 
1442 	vn_iowait(vp);  /* wait for the completion of any pending DIOs */
1443 
1444 	/*
1445 	 * Call toss_pages or flushinval_pages to get rid of pages
1446 	 * overlapping the region being removed.  We have to use
1447 	 * the less efficient flushinval_pages in the case that the
1448 	 * caller may not be able to finish the truncate without
1449 	 * dropping the inode's I/O lock.  Make sure
1450 	 * to catch any pages brought in by buffers overlapping
1451 	 * the EOF by searching out beyond the isize by our
1452 	 * block size. We round new_size up to a block boundary
1453 	 * so that we don't toss things on the same block as
1454 	 * new_size but before it.
1455 	 *
1456 	 * Before calling toss_page or flushinval_pages, make sure to
1457 	 * call remapf() over the same region if the file is mapped.
1458 	 * This frees up mapped file references to the pages in the
1459 	 * given range and for the flushinval_pages case it ensures
1460 	 * that we get the latest mapped changes flushed out.
1461 	 */
1462 	toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1463 	toss_start = XFS_FSB_TO_B(mp, toss_start);
1464 	if (toss_start < 0) {
1465 		/*
1466 		 * The place to start tossing is beyond our maximum
1467 		 * file size, so there is no way that the data extended
1468 		 * out there.
1469 		 */
1470 		return;
1471 	}
1472 	last_byte = xfs_file_last_byte(ip);
1473 	xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start,
1474 			 last_byte);
1475 	if (last_byte > toss_start) {
1476 		if (flags & XFS_ITRUNC_DEFINITE) {
1477 			bhv_vop_toss_pages(vp, toss_start, -1, FI_REMAPF_LOCKED);
1478 		} else {
1479 			bhv_vop_flushinval_pages(vp, toss_start, -1, FI_REMAPF_LOCKED);
1480 		}
1481 	}
1482 
1483 #ifdef DEBUG
1484 	if (new_size == 0) {
1485 		ASSERT(VN_CACHED(vp) == 0);
1486 	}
1487 #endif
1488 }
1489 
1490 /*
1491  * Shrink the file to the given new_size.  The new
1492  * size must be smaller than the current size.
1493  * This will free up the underlying blocks
1494  * in the removed range after a call to xfs_itruncate_start()
1495  * or xfs_atruncate_start().
1496  *
1497  * The transaction passed to this routine must have made
1498  * a permanent log reservation of at least XFS_ITRUNCATE_LOG_RES.
1499  * This routine may commit the given transaction and
1500  * start new ones, so make sure everything involved in
1501  * the transaction is tidy before calling here.
1502  * Some transaction will be returned to the caller to be
1503  * committed.  The incoming transaction must already include
1504  * the inode, and both inode locks must be held exclusively.
1505  * The inode must also be "held" within the transaction.  On
1506  * return the inode will be "held" within the returned transaction.
1507  * This routine does NOT require any disk space to be reserved
1508  * for it within the transaction.
1509  *
1510  * The fork parameter must be either xfs_attr_fork or xfs_data_fork,
1511  * and it indicates the fork which is to be truncated.  For the
1512  * attribute fork we only support truncation to size 0.
1513  *
1514  * We use the sync parameter to indicate whether or not the first
1515  * transaction we perform might have to be synchronous.  For the attr fork,
1516  * it needs to be so if the unlink of the inode is not yet known to be
1517  * permanent in the log.  This keeps us from freeing and reusing the
1518  * blocks of the attribute fork before the unlink of the inode becomes
1519  * permanent.
1520  *
1521  * For the data fork, we normally have to run synchronously if we're
1522  * being called out of the inactive path or we're being called
1523  * out of the create path where we're truncating an existing file.
1524  * Either way, the truncate needs to be sync so blocks don't reappear
1525  * in the file with altered data in case of a crash.  wsync filesystems
1526  * can run the first case async because anything that shrinks the inode
1527  * has to run sync so by the time we're called here from inactive, the
1528  * inode size is permanently set to 0.
1529  *
1530  * Calls from the truncate path always need to be sync unless we're
1531  * in a wsync filesystem and the file has already been unlinked.
1532  *
1533  * The caller is responsible for correctly setting the sync parameter.
1534  * It gets too hard for us to guess here which path we're being called
1535  * out of just based on inode state.
1536  */
1537 int
1538 xfs_itruncate_finish(
1539 	xfs_trans_t	**tp,
1540 	xfs_inode_t	*ip,
1541 	xfs_fsize_t	new_size,
1542 	int		fork,
1543 	int		sync)
1544 {
1545 	xfs_fsblock_t	first_block;
1546 	xfs_fileoff_t	first_unmap_block;
1547 	xfs_fileoff_t	last_block;
1548 	xfs_filblks_t	unmap_len=0;
1549 	xfs_mount_t	*mp;
1550 	xfs_trans_t	*ntp;
1551 	int		done;
1552 	int		committed;
1553 	xfs_bmap_free_t	free_list;
1554 	int		error;
1555 
1556 	ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);
1557 	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
1558 	ASSERT((new_size == 0) || (new_size <= ip->i_d.di_size));
1559 	ASSERT(*tp != NULL);
1560 	ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
1561 	ASSERT(ip->i_transp == *tp);
1562 	ASSERT(ip->i_itemp != NULL);
1563 	ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD);
1564 
1565 
1566 	ntp = *tp;
1567 	mp = (ntp)->t_mountp;
1568 	ASSERT(! XFS_NOT_DQATTACHED(mp, ip));
1569 
1570 	/*
1571 	 * We only support truncating the entire attribute fork.
1572 	 */
1573 	if (fork == XFS_ATTR_FORK) {
1574 		new_size = 0LL;
1575 	}
1576 	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1577 	xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0);
1578 	/*
1579 	 * The first thing we do is set the size to new_size permanently
1580 	 * on disk.  This way we don't have to worry about anyone ever
1581 	 * being able to look at the data being freed even in the face
1582 	 * of a crash.  What we're getting around here is the case where
1583 	 * we free a block, it is allocated to another file, it is written
1584 	 * to, and then we crash.  If the new data gets written to the
1585 	 * file but the log buffers containing the free and reallocation
1586 	 * don't, then we'd end up with garbage in the blocks being freed.
1587 	 * As long as we make the new_size permanent before actually
1588 	 * freeing any blocks it doesn't matter if they get writtten to.
1589 	 *
1590 	 * The callers must signal into us whether or not the size
1591 	 * setting here must be synchronous.  There are a few cases
1592 	 * where it doesn't have to be synchronous.  Those cases
1593 	 * occur if the file is unlinked and we know the unlink is
1594 	 * permanent or if the blocks being truncated are guaranteed
1595 	 * to be beyond the inode eof (regardless of the link count)
1596 	 * and the eof value is permanent.  Both of these cases occur
1597 	 * only on wsync-mounted filesystems.  In those cases, we're
1598 	 * guaranteed that no user will ever see the data in the blocks
1599 	 * that are being truncated so the truncate can run async.
1600 	 * In the free beyond eof case, the file may wind up with
1601 	 * more blocks allocated to it than it needs if we crash
1602 	 * and that won't get fixed until the next time the file
1603 	 * is re-opened and closed but that's ok as that shouldn't
1604 	 * be too many blocks.
1605 	 *
1606 	 * However, we can't just make all wsync xactions run async
1607 	 * because there's one call out of the create path that needs
1608 	 * to run sync where it's truncating an existing file to size
1609 	 * 0 whose size is > 0.
1610 	 *
1611 	 * It's probably possible to come up with a test in this
1612 	 * routine that would correctly distinguish all the above
1613 	 * cases from the values of the function parameters and the
1614 	 * inode state but for sanity's sake, I've decided to let the
1615 	 * layers above just tell us.  It's simpler to correctly figure
1616 	 * out in the layer above exactly under what conditions we
1617 	 * can run async and I think it's easier for others read and
1618 	 * follow the logic in case something has to be changed.
1619 	 * cscope is your friend -- rcc.
1620 	 *
1621 	 * The attribute fork is much simpler.
1622 	 *
1623 	 * For the attribute fork we allow the caller to tell us whether
1624 	 * the unlink of the inode that led to this call is yet permanent
1625 	 * in the on disk log.  If it is not and we will be freeing extents
1626 	 * in this inode then we make the first transaction synchronous
1627 	 * to make sure that the unlink is permanent by the time we free
1628 	 * the blocks.
1629 	 */
1630 	if (fork == XFS_DATA_FORK) {
1631 		if (ip->i_d.di_nextents > 0) {
1632 			ip->i_d.di_size = new_size;
1633 			xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1634 		}
1635 	} else if (sync) {
1636 		ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC));
1637 		if (ip->i_d.di_anextents > 0)
1638 			xfs_trans_set_sync(ntp);
1639 	}
1640 	ASSERT(fork == XFS_DATA_FORK ||
1641 		(fork == XFS_ATTR_FORK &&
1642 			((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) ||
1643 			 (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC)))));
1644 
1645 	/*
1646 	 * Since it is possible for space to become allocated beyond
1647 	 * the end of the file (in a crash where the space is allocated
1648 	 * but the inode size is not yet updated), simply remove any
1649 	 * blocks which show up between the new EOF and the maximum
1650 	 * possible file size.  If the first block to be removed is
1651 	 * beyond the maximum file size (ie it is the same as last_block),
1652 	 * then there is nothing to do.
1653 	 */
1654 	last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
1655 	ASSERT(first_unmap_block <= last_block);
1656 	done = 0;
1657 	if (last_block == first_unmap_block) {
1658 		done = 1;
1659 	} else {
1660 		unmap_len = last_block - first_unmap_block + 1;
1661 	}
1662 	while (!done) {
1663 		/*
1664 		 * Free up up to XFS_ITRUNC_MAX_EXTENTS.  xfs_bunmapi()
1665 		 * will tell us whether it freed the entire range or
1666 		 * not.  If this is a synchronous mount (wsync),
1667 		 * then we can tell bunmapi to keep all the
1668 		 * transactions asynchronous since the unlink
1669 		 * transaction that made this inode inactive has
1670 		 * already hit the disk.  There's no danger of
1671 		 * the freed blocks being reused, there being a
1672 		 * crash, and the reused blocks suddenly reappearing
1673 		 * in this file with garbage in them once recovery
1674 		 * runs.
1675 		 */
1676 		XFS_BMAP_INIT(&free_list, &first_block);
1677 		error = XFS_BUNMAPI(mp, ntp, &ip->i_iocore,
1678 				    first_unmap_block, unmap_len,
1679 				    XFS_BMAPI_AFLAG(fork) |
1680 				      (sync ? 0 : XFS_BMAPI_ASYNC),
1681 				    XFS_ITRUNC_MAX_EXTENTS,
1682 				    &first_block, &free_list,
1683 				    NULL, &done);
1684 		if (error) {
1685 			/*
1686 			 * If the bunmapi call encounters an error,
1687 			 * return to the caller where the transaction
1688 			 * can be properly aborted.  We just need to
1689 			 * make sure we're not holding any resources
1690 			 * that we were not when we came in.
1691 			 */
1692 			xfs_bmap_cancel(&free_list);
1693 			return error;
1694 		}
1695 
1696 		/*
1697 		 * Duplicate the transaction that has the permanent
1698 		 * reservation and commit the old transaction.
1699 		 */
1700 		error = xfs_bmap_finish(tp, &free_list, first_block,
1701 					&committed);
1702 		ntp = *tp;
1703 		if (error) {
1704 			/*
1705 			 * If the bmap finish call encounters an error,
1706 			 * return to the caller where the transaction
1707 			 * can be properly aborted.  We just need to
1708 			 * make sure we're not holding any resources
1709 			 * that we were not when we came in.
1710 			 *
1711 			 * Aborting from this point might lose some
1712 			 * blocks in the file system, but oh well.
1713 			 */
1714 			xfs_bmap_cancel(&free_list);
1715 			if (committed) {
1716 				/*
1717 				 * If the passed in transaction committed
1718 				 * in xfs_bmap_finish(), then we want to
1719 				 * add the inode to this one before returning.
1720 				 * This keeps things simple for the higher
1721 				 * level code, because it always knows that
1722 				 * the inode is locked and held in the
1723 				 * transaction that returns to it whether
1724 				 * errors occur or not.  We don't mark the
1725 				 * inode dirty so that this transaction can
1726 				 * be easily aborted if possible.
1727 				 */
1728 				xfs_trans_ijoin(ntp, ip,
1729 					XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1730 				xfs_trans_ihold(ntp, ip);
1731 			}
1732 			return error;
1733 		}
1734 
1735 		if (committed) {
1736 			/*
1737 			 * The first xact was committed,
1738 			 * so add the inode to the new one.
1739 			 * Mark it dirty so it will be logged
1740 			 * and moved forward in the log as
1741 			 * part of every commit.
1742 			 */
1743 			xfs_trans_ijoin(ntp, ip,
1744 					XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1745 			xfs_trans_ihold(ntp, ip);
1746 			xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1747 		}
1748 		ntp = xfs_trans_dup(ntp);
1749 		(void) xfs_trans_commit(*tp, 0, NULL);
1750 		*tp = ntp;
1751 		error = xfs_trans_reserve(ntp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
1752 					  XFS_TRANS_PERM_LOG_RES,
1753 					  XFS_ITRUNCATE_LOG_COUNT);
1754 		/*
1755 		 * Add the inode being truncated to the next chained
1756 		 * transaction.
1757 		 */
1758 		xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1759 		xfs_trans_ihold(ntp, ip);
1760 		if (error)
1761 			return (error);
1762 	}
1763 	/*
1764 	 * Only update the size in the case of the data fork, but
1765 	 * always re-log the inode so that our permanent transaction
1766 	 * can keep on rolling it forward in the log.
1767 	 */
1768 	if (fork == XFS_DATA_FORK) {
1769 		xfs_isize_check(mp, ip, new_size);
1770 		ip->i_d.di_size = new_size;
1771 	}
1772 	xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1773 	ASSERT((new_size != 0) ||
1774 	       (fork == XFS_ATTR_FORK) ||
1775 	       (ip->i_delayed_blks == 0));
1776 	ASSERT((new_size != 0) ||
1777 	       (fork == XFS_ATTR_FORK) ||
1778 	       (ip->i_d.di_nextents == 0));
1779 	xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0);
1780 	return 0;
1781 }
1782 
1783 
1784 /*
1785  * xfs_igrow_start
1786  *
1787  * Do the first part of growing a file: zero any data in the last
1788  * block that is beyond the old EOF.  We need to do this before
1789  * the inode is joined to the transaction to modify the i_size.
1790  * That way we can drop the inode lock and call into the buffer
1791  * cache to get the buffer mapping the EOF.
1792  */
1793 int
1794 xfs_igrow_start(
1795 	xfs_inode_t	*ip,
1796 	xfs_fsize_t	new_size,
1797 	cred_t		*credp)
1798 {
1799 	int		error;
1800 
1801 	ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
1802 	ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
1803 	ASSERT(new_size > ip->i_d.di_size);
1804 
1805 	/*
1806 	 * Zero any pages that may have been created by
1807 	 * xfs_write_file() beyond the end of the file
1808 	 * and any blocks between the old and new file sizes.
1809 	 */
1810 	error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size,
1811 			     ip->i_d.di_size, new_size);
1812 	return error;
1813 }
1814 
1815 /*
1816  * xfs_igrow_finish
1817  *
1818  * This routine is called to extend the size of a file.
1819  * The inode must have both the iolock and the ilock locked
1820  * for update and it must be a part of the current transaction.
1821  * The xfs_igrow_start() function must have been called previously.
1822  * If the change_flag is not zero, the inode change timestamp will
1823  * be updated.
1824  */
1825 void
1826 xfs_igrow_finish(
1827 	xfs_trans_t	*tp,
1828 	xfs_inode_t	*ip,
1829 	xfs_fsize_t	new_size,
1830 	int		change_flag)
1831 {
1832 	ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
1833 	ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
1834 	ASSERT(ip->i_transp == tp);
1835 	ASSERT(new_size > ip->i_d.di_size);
1836 
1837 	/*
1838 	 * Update the file size.  Update the inode change timestamp
1839 	 * if change_flag set.
1840 	 */
1841 	ip->i_d.di_size = new_size;
1842 	if (change_flag)
1843 		xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
1844 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1845 
1846 }
1847 
1848 
1849 /*
1850  * This is called when the inode's link count goes to 0.
1851  * We place the on-disk inode on a list in the AGI.  It
1852  * will be pulled from this list when the inode is freed.
1853  */
1854 int
1855 xfs_iunlink(
1856 	xfs_trans_t	*tp,
1857 	xfs_inode_t	*ip)
1858 {
1859 	xfs_mount_t	*mp;
1860 	xfs_agi_t	*agi;
1861 	xfs_dinode_t	*dip;
1862 	xfs_buf_t	*agibp;
1863 	xfs_buf_t	*ibp;
1864 	xfs_agnumber_t	agno;
1865 	xfs_daddr_t	agdaddr;
1866 	xfs_agino_t	agino;
1867 	short		bucket_index;
1868 	int		offset;
1869 	int		error;
1870 	int		agi_ok;
1871 
1872 	ASSERT(ip->i_d.di_nlink == 0);
1873 	ASSERT(ip->i_d.di_mode != 0);
1874 	ASSERT(ip->i_transp == tp);
1875 
1876 	mp = tp->t_mountp;
1877 
1878 	agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1879 	agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
1880 
1881 	/*
1882 	 * Get the agi buffer first.  It ensures lock ordering
1883 	 * on the list.
1884 	 */
1885 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr,
1886 				   XFS_FSS_TO_BB(mp, 1), 0, &agibp);
1887 	if (error) {
1888 		return error;
1889 	}
1890 	/*
1891 	 * Validate the magic number of the agi block.
1892 	 */
1893 	agi = XFS_BUF_TO_AGI(agibp);
1894 	agi_ok =
1895 		be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
1896 		XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
1897 	if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK,
1898 			XFS_RANDOM_IUNLINK))) {
1899 		XFS_CORRUPTION_ERROR("xfs_iunlink", XFS_ERRLEVEL_LOW, mp, agi);
1900 		xfs_trans_brelse(tp, agibp);
1901 		return XFS_ERROR(EFSCORRUPTED);
1902 	}
1903 	/*
1904 	 * Get the index into the agi hash table for the
1905 	 * list this inode will go on.
1906 	 */
1907 	agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1908 	ASSERT(agino != 0);
1909 	bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1910 	ASSERT(agi->agi_unlinked[bucket_index]);
1911 	ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
1912 
1913 	if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) {
1914 		/*
1915 		 * There is already another inode in the bucket we need
1916 		 * to add ourselves to.  Add us at the front of the list.
1917 		 * Here we put the head pointer into our next pointer,
1918 		 * and then we fall through to point the head at us.
1919 		 */
1920 		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
1921 		if (error) {
1922 			return error;
1923 		}
1924 		ASSERT(INT_GET(dip->di_next_unlinked, ARCH_CONVERT) == NULLAGINO);
1925 		ASSERT(dip->di_next_unlinked);
1926 		/* both on-disk, don't endian flip twice */
1927 		dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
1928 		offset = ip->i_boffset +
1929 			offsetof(xfs_dinode_t, di_next_unlinked);
1930 		xfs_trans_inode_buf(tp, ibp);
1931 		xfs_trans_log_buf(tp, ibp, offset,
1932 				  (offset + sizeof(xfs_agino_t) - 1));
1933 		xfs_inobp_check(mp, ibp);
1934 	}
1935 
1936 	/*
1937 	 * Point the bucket head pointer at the inode being inserted.
1938 	 */
1939 	ASSERT(agino != 0);
1940 	agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
1941 	offset = offsetof(xfs_agi_t, agi_unlinked) +
1942 		(sizeof(xfs_agino_t) * bucket_index);
1943 	xfs_trans_log_buf(tp, agibp, offset,
1944 			  (offset + sizeof(xfs_agino_t) - 1));
1945 	return 0;
1946 }
1947 
1948 /*
1949  * Pull the on-disk inode from the AGI unlinked list.
1950  */
1951 STATIC int
1952 xfs_iunlink_remove(
1953 	xfs_trans_t	*tp,
1954 	xfs_inode_t	*ip)
1955 {
1956 	xfs_ino_t	next_ino;
1957 	xfs_mount_t	*mp;
1958 	xfs_agi_t	*agi;
1959 	xfs_dinode_t	*dip;
1960 	xfs_buf_t	*agibp;
1961 	xfs_buf_t	*ibp;
1962 	xfs_agnumber_t	agno;
1963 	xfs_daddr_t	agdaddr;
1964 	xfs_agino_t	agino;
1965 	xfs_agino_t	next_agino;
1966 	xfs_buf_t	*last_ibp;
1967 	xfs_dinode_t	*last_dip = NULL;
1968 	short		bucket_index;
1969 	int		offset, last_offset = 0;
1970 	int		error;
1971 	int		agi_ok;
1972 
1973 	/*
1974 	 * First pull the on-disk inode from the AGI unlinked list.
1975 	 */
1976 	mp = tp->t_mountp;
1977 
1978 	agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1979 	agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
1980 
1981 	/*
1982 	 * Get the agi buffer first.  It ensures lock ordering
1983 	 * on the list.
1984 	 */
1985 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr,
1986 				   XFS_FSS_TO_BB(mp, 1), 0, &agibp);
1987 	if (error) {
1988 		cmn_err(CE_WARN,
1989 			"xfs_iunlink_remove: xfs_trans_read_buf()  returned an error %d on %s.  Returning error.",
1990 			error, mp->m_fsname);
1991 		return error;
1992 	}
1993 	/*
1994 	 * Validate the magic number of the agi block.
1995 	 */
1996 	agi = XFS_BUF_TO_AGI(agibp);
1997 	agi_ok =
1998 		be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
1999 		XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
2000 	if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK_REMOVE,
2001 			XFS_RANDOM_IUNLINK_REMOVE))) {
2002 		XFS_CORRUPTION_ERROR("xfs_iunlink_remove", XFS_ERRLEVEL_LOW,
2003 				     mp, agi);
2004 		xfs_trans_brelse(tp, agibp);
2005 		cmn_err(CE_WARN,
2006 			"xfs_iunlink_remove: XFS_TEST_ERROR()  returned an error on %s.  Returning EFSCORRUPTED.",
2007 			 mp->m_fsname);
2008 		return XFS_ERROR(EFSCORRUPTED);
2009 	}
2010 	/*
2011 	 * Get the index into the agi hash table for the
2012 	 * list this inode will go on.
2013 	 */
2014 	agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2015 	ASSERT(agino != 0);
2016 	bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2017 	ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO);
2018 	ASSERT(agi->agi_unlinked[bucket_index]);
2019 
2020 	if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
2021 		/*
2022 		 * We're at the head of the list.  Get the inode's
2023 		 * on-disk buffer to see if there is anyone after us
2024 		 * on the list.  Only modify our next pointer if it
2025 		 * is not already NULLAGINO.  This saves us the overhead
2026 		 * of dealing with the buffer when there is no need to
2027 		 * change it.
2028 		 */
2029 		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
2030 		if (error) {
2031 			cmn_err(CE_WARN,
2032 				"xfs_iunlink_remove: xfs_itobp()  returned an error %d on %s.  Returning error.",
2033 				error, mp->m_fsname);
2034 			return error;
2035 		}
2036 		next_agino = INT_GET(dip->di_next_unlinked, ARCH_CONVERT);
2037 		ASSERT(next_agino != 0);
2038 		if (next_agino != NULLAGINO) {
2039 			INT_SET(dip->di_next_unlinked, ARCH_CONVERT, NULLAGINO);
2040 			offset = ip->i_boffset +
2041 				offsetof(xfs_dinode_t, di_next_unlinked);
2042 			xfs_trans_inode_buf(tp, ibp);
2043 			xfs_trans_log_buf(tp, ibp, offset,
2044 					  (offset + sizeof(xfs_agino_t) - 1));
2045 			xfs_inobp_check(mp, ibp);
2046 		} else {
2047 			xfs_trans_brelse(tp, ibp);
2048 		}
2049 		/*
2050 		 * Point the bucket head pointer at the next inode.
2051 		 */
2052 		ASSERT(next_agino != 0);
2053 		ASSERT(next_agino != agino);
2054 		agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
2055 		offset = offsetof(xfs_agi_t, agi_unlinked) +
2056 			(sizeof(xfs_agino_t) * bucket_index);
2057 		xfs_trans_log_buf(tp, agibp, offset,
2058 				  (offset + sizeof(xfs_agino_t) - 1));
2059 	} else {
2060 		/*
2061 		 * We need to search the list for the inode being freed.
2062 		 */
2063 		next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2064 		last_ibp = NULL;
2065 		while (next_agino != agino) {
2066 			/*
2067 			 * If the last inode wasn't the one pointing to
2068 			 * us, then release its buffer since we're not
2069 			 * going to do anything with it.
2070 			 */
2071 			if (last_ibp != NULL) {
2072 				xfs_trans_brelse(tp, last_ibp);
2073 			}
2074 			next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
2075 			error = xfs_inotobp(mp, tp, next_ino, &last_dip,
2076 					    &last_ibp, &last_offset);
2077 			if (error) {
2078 				cmn_err(CE_WARN,
2079 			"xfs_iunlink_remove: xfs_inotobp()  returned an error %d on %s.  Returning error.",
2080 					error, mp->m_fsname);
2081 				return error;
2082 			}
2083 			next_agino = INT_GET(last_dip->di_next_unlinked, ARCH_CONVERT);
2084 			ASSERT(next_agino != NULLAGINO);
2085 			ASSERT(next_agino != 0);
2086 		}
2087 		/*
2088 		 * Now last_ibp points to the buffer previous to us on
2089 		 * the unlinked list.  Pull us from the list.
2090 		 */
2091 		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
2092 		if (error) {
2093 			cmn_err(CE_WARN,
2094 				"xfs_iunlink_remove: xfs_itobp()  returned an error %d on %s.  Returning error.",
2095 				error, mp->m_fsname);
2096 			return error;
2097 		}
2098 		next_agino = INT_GET(dip->di_next_unlinked, ARCH_CONVERT);
2099 		ASSERT(next_agino != 0);
2100 		ASSERT(next_agino != agino);
2101 		if (next_agino != NULLAGINO) {
2102 			INT_SET(dip->di_next_unlinked, ARCH_CONVERT, NULLAGINO);
2103 			offset = ip->i_boffset +
2104 				offsetof(xfs_dinode_t, di_next_unlinked);
2105 			xfs_trans_inode_buf(tp, ibp);
2106 			xfs_trans_log_buf(tp, ibp, offset,
2107 					  (offset + sizeof(xfs_agino_t) - 1));
2108 			xfs_inobp_check(mp, ibp);
2109 		} else {
2110 			xfs_trans_brelse(tp, ibp);
2111 		}
2112 		/*
2113 		 * Point the previous inode on the list to the next inode.
2114 		 */
2115 		INT_SET(last_dip->di_next_unlinked, ARCH_CONVERT, next_agino);
2116 		ASSERT(next_agino != 0);
2117 		offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
2118 		xfs_trans_inode_buf(tp, last_ibp);
2119 		xfs_trans_log_buf(tp, last_ibp, offset,
2120 				  (offset + sizeof(xfs_agino_t) - 1));
2121 		xfs_inobp_check(mp, last_ibp);
2122 	}
2123 	return 0;
2124 }
2125 
2126 static __inline__ int xfs_inode_clean(xfs_inode_t *ip)
2127 {
2128 	return (((ip->i_itemp == NULL) ||
2129 		!(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
2130 		(ip->i_update_core == 0));
2131 }
2132 
2133 STATIC void
2134 xfs_ifree_cluster(
2135 	xfs_inode_t	*free_ip,
2136 	xfs_trans_t	*tp,
2137 	xfs_ino_t	inum)
2138 {
2139 	xfs_mount_t		*mp = free_ip->i_mount;
2140 	int			blks_per_cluster;
2141 	int			nbufs;
2142 	int			ninodes;
2143 	int			i, j, found, pre_flushed;
2144 	xfs_daddr_t		blkno;
2145 	xfs_buf_t		*bp;
2146 	xfs_ihash_t		*ih;
2147 	xfs_inode_t		*ip, **ip_found;
2148 	xfs_inode_log_item_t	*iip;
2149 	xfs_log_item_t		*lip;
2150 	SPLDECL(s);
2151 
2152 	if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
2153 		blks_per_cluster = 1;
2154 		ninodes = mp->m_sb.sb_inopblock;
2155 		nbufs = XFS_IALLOC_BLOCKS(mp);
2156 	} else {
2157 		blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
2158 					mp->m_sb.sb_blocksize;
2159 		ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
2160 		nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
2161 	}
2162 
2163 	ip_found = kmem_alloc(ninodes * sizeof(xfs_inode_t *), KM_NOFS);
2164 
2165 	for (j = 0; j < nbufs; j++, inum += ninodes) {
2166 		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2167 					 XFS_INO_TO_AGBNO(mp, inum));
2168 
2169 
2170 		/*
2171 		 * Look for each inode in memory and attempt to lock it,
2172 		 * we can be racing with flush and tail pushing here.
2173 		 * any inode we get the locks on, add to an array of
2174 		 * inode items to process later.
2175 		 *
2176 		 * The get the buffer lock, we could beat a flush
2177 		 * or tail pushing thread to the lock here, in which
2178 		 * case they will go looking for the inode buffer
2179 		 * and fail, we need some other form of interlock
2180 		 * here.
2181 		 */
2182 		found = 0;
2183 		for (i = 0; i < ninodes; i++) {
2184 			ih = XFS_IHASH(mp, inum + i);
2185 			read_lock(&ih->ih_lock);
2186 			for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) {
2187 				if (ip->i_ino == inum + i)
2188 					break;
2189 			}
2190 
2191 			/* Inode not in memory or we found it already,
2192 			 * nothing to do
2193 			 */
2194 			if (!ip || (ip->i_flags & XFS_ISTALE)) {
2195 				read_unlock(&ih->ih_lock);
2196 				continue;
2197 			}
2198 
2199 			if (xfs_inode_clean(ip)) {
2200 				read_unlock(&ih->ih_lock);
2201 				continue;
2202 			}
2203 
2204 			/* If we can get the locks then add it to the
2205 			 * list, otherwise by the time we get the bp lock
2206 			 * below it will already be attached to the
2207 			 * inode buffer.
2208 			 */
2209 
2210 			/* This inode will already be locked - by us, lets
2211 			 * keep it that way.
2212 			 */
2213 
2214 			if (ip == free_ip) {
2215 				if (xfs_iflock_nowait(ip)) {
2216 					ip->i_flags |= XFS_ISTALE;
2217 
2218 					if (xfs_inode_clean(ip)) {
2219 						xfs_ifunlock(ip);
2220 					} else {
2221 						ip_found[found++] = ip;
2222 					}
2223 				}
2224 				read_unlock(&ih->ih_lock);
2225 				continue;
2226 			}
2227 
2228 			if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2229 				if (xfs_iflock_nowait(ip)) {
2230 					ip->i_flags |= XFS_ISTALE;
2231 
2232 					if (xfs_inode_clean(ip)) {
2233 						xfs_ifunlock(ip);
2234 						xfs_iunlock(ip, XFS_ILOCK_EXCL);
2235 					} else {
2236 						ip_found[found++] = ip;
2237 					}
2238 				} else {
2239 					xfs_iunlock(ip, XFS_ILOCK_EXCL);
2240 				}
2241 			}
2242 
2243 			read_unlock(&ih->ih_lock);
2244 		}
2245 
2246 		bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2247 					mp->m_bsize * blks_per_cluster,
2248 					XFS_BUF_LOCK);
2249 
2250 		pre_flushed = 0;
2251 		lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
2252 		while (lip) {
2253 			if (lip->li_type == XFS_LI_INODE) {
2254 				iip = (xfs_inode_log_item_t *)lip;
2255 				ASSERT(iip->ili_logged == 1);
2256 				lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done;
2257 				AIL_LOCK(mp,s);
2258 				iip->ili_flush_lsn = iip->ili_item.li_lsn;
2259 				AIL_UNLOCK(mp, s);
2260 				iip->ili_inode->i_flags |= XFS_ISTALE;
2261 				pre_flushed++;
2262 			}
2263 			lip = lip->li_bio_list;
2264 		}
2265 
2266 		for (i = 0; i < found; i++) {
2267 			ip = ip_found[i];
2268 			iip = ip->i_itemp;
2269 
2270 			if (!iip) {
2271 				ip->i_update_core = 0;
2272 				xfs_ifunlock(ip);
2273 				xfs_iunlock(ip, XFS_ILOCK_EXCL);
2274 				continue;
2275 			}
2276 
2277 			iip->ili_last_fields = iip->ili_format.ilf_fields;
2278 			iip->ili_format.ilf_fields = 0;
2279 			iip->ili_logged = 1;
2280 			AIL_LOCK(mp,s);
2281 			iip->ili_flush_lsn = iip->ili_item.li_lsn;
2282 			AIL_UNLOCK(mp, s);
2283 
2284 			xfs_buf_attach_iodone(bp,
2285 				(void(*)(xfs_buf_t*,xfs_log_item_t*))
2286 				xfs_istale_done, (xfs_log_item_t *)iip);
2287 			if (ip != free_ip) {
2288 				xfs_iunlock(ip, XFS_ILOCK_EXCL);
2289 			}
2290 		}
2291 
2292 		if (found || pre_flushed)
2293 			xfs_trans_stale_inode_buf(tp, bp);
2294 		xfs_trans_binval(tp, bp);
2295 	}
2296 
2297 	kmem_free(ip_found, ninodes * sizeof(xfs_inode_t *));
2298 }
2299 
2300 /*
2301  * This is called to return an inode to the inode free list.
2302  * The inode should already be truncated to 0 length and have
2303  * no pages associated with it.  This routine also assumes that
2304  * the inode is already a part of the transaction.
2305  *
2306  * The on-disk copy of the inode will have been added to the list
2307  * of unlinked inodes in the AGI. We need to remove the inode from
2308  * that list atomically with respect to freeing it here.
2309  */
2310 int
2311 xfs_ifree(
2312 	xfs_trans_t	*tp,
2313 	xfs_inode_t	*ip,
2314 	xfs_bmap_free_t	*flist)
2315 {
2316 	int			error;
2317 	int			delete;
2318 	xfs_ino_t		first_ino;
2319 
2320 	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
2321 	ASSERT(ip->i_transp == tp);
2322 	ASSERT(ip->i_d.di_nlink == 0);
2323 	ASSERT(ip->i_d.di_nextents == 0);
2324 	ASSERT(ip->i_d.di_anextents == 0);
2325 	ASSERT((ip->i_d.di_size == 0) ||
2326 	       ((ip->i_d.di_mode & S_IFMT) != S_IFREG));
2327 	ASSERT(ip->i_d.di_nblocks == 0);
2328 
2329 	/*
2330 	 * Pull the on-disk inode from the AGI unlinked list.
2331 	 */
2332 	error = xfs_iunlink_remove(tp, ip);
2333 	if (error != 0) {
2334 		return error;
2335 	}
2336 
2337 	error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
2338 	if (error != 0) {
2339 		return error;
2340 	}
2341 	ip->i_d.di_mode = 0;		/* mark incore inode as free */
2342 	ip->i_d.di_flags = 0;
2343 	ip->i_d.di_dmevmask = 0;
2344 	ip->i_d.di_forkoff = 0;		/* mark the attr fork not in use */
2345 	ip->i_df.if_ext_max =
2346 		XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
2347 	ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2348 	ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2349 	/*
2350 	 * Bump the generation count so no one will be confused
2351 	 * by reincarnations of this inode.
2352 	 */
2353 	ip->i_d.di_gen++;
2354 	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2355 
2356 	if (delete) {
2357 		xfs_ifree_cluster(ip, tp, first_ino);
2358 	}
2359 
2360 	return 0;
2361 }
2362 
2363 /*
2364  * Reallocate the space for if_broot based on the number of records
2365  * being added or deleted as indicated in rec_diff.  Move the records
2366  * and pointers in if_broot to fit the new size.  When shrinking this
2367  * will eliminate holes between the records and pointers created by
2368  * the caller.  When growing this will create holes to be filled in
2369  * by the caller.
2370  *
2371  * The caller must not request to add more records than would fit in
2372  * the on-disk inode root.  If the if_broot is currently NULL, then
2373  * if we adding records one will be allocated.  The caller must also
2374  * not request that the number of records go below zero, although
2375  * it can go to zero.
2376  *
2377  * ip -- the inode whose if_broot area is changing
2378  * ext_diff -- the change in the number of records, positive or negative,
2379  *	 requested for the if_broot array.
2380  */
2381 void
2382 xfs_iroot_realloc(
2383 	xfs_inode_t		*ip,
2384 	int			rec_diff,
2385 	int			whichfork)
2386 {
2387 	int			cur_max;
2388 	xfs_ifork_t		*ifp;
2389 	xfs_bmbt_block_t	*new_broot;
2390 	int			new_max;
2391 	size_t			new_size;
2392 	char			*np;
2393 	char			*op;
2394 
2395 	/*
2396 	 * Handle the degenerate case quietly.
2397 	 */
2398 	if (rec_diff == 0) {
2399 		return;
2400 	}
2401 
2402 	ifp = XFS_IFORK_PTR(ip, whichfork);
2403 	if (rec_diff > 0) {
2404 		/*
2405 		 * If there wasn't any memory allocated before, just
2406 		 * allocate it now and get out.
2407 		 */
2408 		if (ifp->if_broot_bytes == 0) {
2409 			new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff);
2410 			ifp->if_broot = (xfs_bmbt_block_t*)kmem_alloc(new_size,
2411 								     KM_SLEEP);
2412 			ifp->if_broot_bytes = (int)new_size;
2413 			return;
2414 		}
2415 
2416 		/*
2417 		 * If there is already an existing if_broot, then we need
2418 		 * to realloc() it and shift the pointers to their new
2419 		 * location.  The records don't change location because
2420 		 * they are kept butted up against the btree block header.
2421 		 */
2422 		cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
2423 		new_max = cur_max + rec_diff;
2424 		new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2425 		ifp->if_broot = (xfs_bmbt_block_t *)
2426 		  kmem_realloc(ifp->if_broot,
2427 				new_size,
2428 				(size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */
2429 				KM_SLEEP);
2430 		op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2431 						      ifp->if_broot_bytes);
2432 		np = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2433 						      (int)new_size);
2434 		ifp->if_broot_bytes = (int)new_size;
2435 		ASSERT(ifp->if_broot_bytes <=
2436 			XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2437 		memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
2438 		return;
2439 	}
2440 
2441 	/*
2442 	 * rec_diff is less than 0.  In this case, we are shrinking the
2443 	 * if_broot buffer.  It must already exist.  If we go to zero
2444 	 * records, just get rid of the root and clear the status bit.
2445 	 */
2446 	ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
2447 	cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
2448 	new_max = cur_max + rec_diff;
2449 	ASSERT(new_max >= 0);
2450 	if (new_max > 0)
2451 		new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2452 	else
2453 		new_size = 0;
2454 	if (new_size > 0) {
2455 		new_broot = (xfs_bmbt_block_t *)kmem_alloc(new_size, KM_SLEEP);
2456 		/*
2457 		 * First copy over the btree block header.
2458 		 */
2459 		memcpy(new_broot, ifp->if_broot, sizeof(xfs_bmbt_block_t));
2460 	} else {
2461 		new_broot = NULL;
2462 		ifp->if_flags &= ~XFS_IFBROOT;
2463 	}
2464 
2465 	/*
2466 	 * Only copy the records and pointers if there are any.
2467 	 */
2468 	if (new_max > 0) {
2469 		/*
2470 		 * First copy the records.
2471 		 */
2472 		op = (char *)XFS_BMAP_BROOT_REC_ADDR(ifp->if_broot, 1,
2473 						     ifp->if_broot_bytes);
2474 		np = (char *)XFS_BMAP_BROOT_REC_ADDR(new_broot, 1,
2475 						     (int)new_size);
2476 		memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
2477 
2478 		/*
2479 		 * Then copy the pointers.
2480 		 */
2481 		op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2482 						     ifp->if_broot_bytes);
2483 		np = (char *)XFS_BMAP_BROOT_PTR_ADDR(new_broot, 1,
2484 						     (int)new_size);
2485 		memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
2486 	}
2487 	kmem_free(ifp->if_broot, ifp->if_broot_bytes);
2488 	ifp->if_broot = new_broot;
2489 	ifp->if_broot_bytes = (int)new_size;
2490 	ASSERT(ifp->if_broot_bytes <=
2491 		XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2492 	return;
2493 }
2494 
2495 
2496 /*
2497  * This is called when the amount of space needed for if_data
2498  * is increased or decreased.  The change in size is indicated by
2499  * the number of bytes that need to be added or deleted in the
2500  * byte_diff parameter.
2501  *
2502  * If the amount of space needed has decreased below the size of the
2503  * inline buffer, then switch to using the inline buffer.  Otherwise,
2504  * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
2505  * to what is needed.
2506  *
2507  * ip -- the inode whose if_data area is changing
2508  * byte_diff -- the change in the number of bytes, positive or negative,
2509  *	 requested for the if_data array.
2510  */
2511 void
2512 xfs_idata_realloc(
2513 	xfs_inode_t	*ip,
2514 	int		byte_diff,
2515 	int		whichfork)
2516 {
2517 	xfs_ifork_t	*ifp;
2518 	int		new_size;
2519 	int		real_size;
2520 
2521 	if (byte_diff == 0) {
2522 		return;
2523 	}
2524 
2525 	ifp = XFS_IFORK_PTR(ip, whichfork);
2526 	new_size = (int)ifp->if_bytes + byte_diff;
2527 	ASSERT(new_size >= 0);
2528 
2529 	if (new_size == 0) {
2530 		if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2531 			kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2532 		}
2533 		ifp->if_u1.if_data = NULL;
2534 		real_size = 0;
2535 	} else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
2536 		/*
2537 		 * If the valid extents/data can fit in if_inline_ext/data,
2538 		 * copy them from the malloc'd vector and free it.
2539 		 */
2540 		if (ifp->if_u1.if_data == NULL) {
2541 			ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2542 		} else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2543 			ASSERT(ifp->if_real_bytes != 0);
2544 			memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
2545 			      new_size);
2546 			kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2547 			ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2548 		}
2549 		real_size = 0;
2550 	} else {
2551 		/*
2552 		 * Stuck with malloc/realloc.
2553 		 * For inline data, the underlying buffer must be
2554 		 * a multiple of 4 bytes in size so that it can be
2555 		 * logged and stay on word boundaries.  We enforce
2556 		 * that here.
2557 		 */
2558 		real_size = roundup(new_size, 4);
2559 		if (ifp->if_u1.if_data == NULL) {
2560 			ASSERT(ifp->if_real_bytes == 0);
2561 			ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2562 		} else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2563 			/*
2564 			 * Only do the realloc if the underlying size
2565 			 * is really changing.
2566 			 */
2567 			if (ifp->if_real_bytes != real_size) {
2568 				ifp->if_u1.if_data =
2569 					kmem_realloc(ifp->if_u1.if_data,
2570 							real_size,
2571 							ifp->if_real_bytes,
2572 							KM_SLEEP);
2573 			}
2574 		} else {
2575 			ASSERT(ifp->if_real_bytes == 0);
2576 			ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2577 			memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
2578 				ifp->if_bytes);
2579 		}
2580 	}
2581 	ifp->if_real_bytes = real_size;
2582 	ifp->if_bytes = new_size;
2583 	ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2584 }
2585 
2586 
2587 
2588 
2589 /*
2590  * Map inode to disk block and offset.
2591  *
2592  * mp -- the mount point structure for the current file system
2593  * tp -- the current transaction
2594  * ino -- the inode number of the inode to be located
2595  * imap -- this structure is filled in with the information necessary
2596  *	 to retrieve the given inode from disk
2597  * flags -- flags to pass to xfs_dilocate indicating whether or not
2598  *	 lookups in the inode btree were OK or not
2599  */
2600 int
2601 xfs_imap(
2602 	xfs_mount_t	*mp,
2603 	xfs_trans_t	*tp,
2604 	xfs_ino_t	ino,
2605 	xfs_imap_t	*imap,
2606 	uint		flags)
2607 {
2608 	xfs_fsblock_t	fsbno;
2609 	int		len;
2610 	int		off;
2611 	int		error;
2612 
2613 	fsbno = imap->im_blkno ?
2614 		XFS_DADDR_TO_FSB(mp, imap->im_blkno) : NULLFSBLOCK;
2615 	error = xfs_dilocate(mp, tp, ino, &fsbno, &len, &off, flags);
2616 	if (error != 0) {
2617 		return error;
2618 	}
2619 	imap->im_blkno = XFS_FSB_TO_DADDR(mp, fsbno);
2620 	imap->im_len = XFS_FSB_TO_BB(mp, len);
2621 	imap->im_agblkno = XFS_FSB_TO_AGBNO(mp, fsbno);
2622 	imap->im_ioffset = (ushort)off;
2623 	imap->im_boffset = (ushort)(off << mp->m_sb.sb_inodelog);
2624 	return 0;
2625 }
2626 
2627 void
2628 xfs_idestroy_fork(
2629 	xfs_inode_t	*ip,
2630 	int		whichfork)
2631 {
2632 	xfs_ifork_t	*ifp;
2633 
2634 	ifp = XFS_IFORK_PTR(ip, whichfork);
2635 	if (ifp->if_broot != NULL) {
2636 		kmem_free(ifp->if_broot, ifp->if_broot_bytes);
2637 		ifp->if_broot = NULL;
2638 	}
2639 
2640 	/*
2641 	 * If the format is local, then we can't have an extents
2642 	 * array so just look for an inline data array.  If we're
2643 	 * not local then we may or may not have an extents list,
2644 	 * so check and free it up if we do.
2645 	 */
2646 	if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
2647 		if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
2648 		    (ifp->if_u1.if_data != NULL)) {
2649 			ASSERT(ifp->if_real_bytes != 0);
2650 			kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2651 			ifp->if_u1.if_data = NULL;
2652 			ifp->if_real_bytes = 0;
2653 		}
2654 	} else if ((ifp->if_flags & XFS_IFEXTENTS) &&
2655 		   ((ifp->if_flags & XFS_IFEXTIREC) ||
2656 		    ((ifp->if_u1.if_extents != NULL) &&
2657 		     (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
2658 		ASSERT(ifp->if_real_bytes != 0);
2659 		xfs_iext_destroy(ifp);
2660 	}
2661 	ASSERT(ifp->if_u1.if_extents == NULL ||
2662 	       ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
2663 	ASSERT(ifp->if_real_bytes == 0);
2664 	if (whichfork == XFS_ATTR_FORK) {
2665 		kmem_zone_free(xfs_ifork_zone, ip->i_afp);
2666 		ip->i_afp = NULL;
2667 	}
2668 }
2669 
2670 /*
2671  * This is called free all the memory associated with an inode.
2672  * It must free the inode itself and any buffers allocated for
2673  * if_extents/if_data and if_broot.  It must also free the lock
2674  * associated with the inode.
2675  */
2676 void
2677 xfs_idestroy(
2678 	xfs_inode_t	*ip)
2679 {
2680 
2681 	switch (ip->i_d.di_mode & S_IFMT) {
2682 	case S_IFREG:
2683 	case S_IFDIR:
2684 	case S_IFLNK:
2685 		xfs_idestroy_fork(ip, XFS_DATA_FORK);
2686 		break;
2687 	}
2688 	if (ip->i_afp)
2689 		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
2690 	mrfree(&ip->i_lock);
2691 	mrfree(&ip->i_iolock);
2692 	freesema(&ip->i_flock);
2693 #ifdef XFS_BMAP_TRACE
2694 	ktrace_free(ip->i_xtrace);
2695 #endif
2696 #ifdef XFS_BMBT_TRACE
2697 	ktrace_free(ip->i_btrace);
2698 #endif
2699 #ifdef XFS_RW_TRACE
2700 	ktrace_free(ip->i_rwtrace);
2701 #endif
2702 #ifdef XFS_ILOCK_TRACE
2703 	ktrace_free(ip->i_lock_trace);
2704 #endif
2705 #ifdef XFS_DIR2_TRACE
2706 	ktrace_free(ip->i_dir_trace);
2707 #endif
2708 	if (ip->i_itemp) {
2709 		/* XXXdpd should be able to assert this but shutdown
2710 		 * is leaving the AIL behind. */
2711 		ASSERT(((ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL) == 0) ||
2712 		       XFS_FORCED_SHUTDOWN(ip->i_mount));
2713 		xfs_inode_item_destroy(ip);
2714 	}
2715 	kmem_zone_free(xfs_inode_zone, ip);
2716 }
2717 
2718 
2719 /*
2720  * Increment the pin count of the given buffer.
2721  * This value is protected by ipinlock spinlock in the mount structure.
2722  */
2723 void
2724 xfs_ipin(
2725 	xfs_inode_t	*ip)
2726 {
2727 	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
2728 
2729 	atomic_inc(&ip->i_pincount);
2730 }
2731 
2732 /*
2733  * Decrement the pin count of the given inode, and wake up
2734  * anyone in xfs_iwait_unpin() if the count goes to 0.  The
2735  * inode must have been previously pinned with a call to xfs_ipin().
2736  */
2737 void
2738 xfs_iunpin(
2739 	xfs_inode_t	*ip)
2740 {
2741 	ASSERT(atomic_read(&ip->i_pincount) > 0);
2742 
2743 	if (atomic_dec_and_test(&ip->i_pincount)) {
2744 		/*
2745 		 * If the inode is currently being reclaimed, the
2746 		 * linux inode _and_ the xfs vnode may have been
2747 		 * freed so we cannot reference either of them safely.
2748 		 * Hence we should not try to do anything to them
2749 		 * if the xfs inode is currently in the reclaim
2750 		 * path.
2751 		 *
2752 		 * However, we still need to issue the unpin wakeup
2753 		 * call as the inode reclaim may be blocked waiting for
2754 		 * the inode to become unpinned.
2755 		 */
2756 		if (!(ip->i_flags & (XFS_IRECLAIM|XFS_IRECLAIMABLE))) {
2757 			bhv_vnode_t	*vp = XFS_ITOV_NULL(ip);
2758 
2759 			/* make sync come back and flush this inode */
2760 			if (vp) {
2761 				struct inode	*inode = vn_to_inode(vp);
2762 
2763 				if (!(inode->i_state &
2764 						(I_NEW|I_FREEING|I_CLEAR)))
2765 					mark_inode_dirty_sync(inode);
2766 			}
2767 		}
2768 		wake_up(&ip->i_ipin_wait);
2769 	}
2770 }
2771 
2772 /*
2773  * This is called to wait for the given inode to be unpinned.
2774  * It will sleep until this happens.  The caller must have the
2775  * inode locked in at least shared mode so that the buffer cannot
2776  * be subsequently pinned once someone is waiting for it to be
2777  * unpinned.
2778  */
2779 STATIC void
2780 xfs_iunpin_wait(
2781 	xfs_inode_t	*ip)
2782 {
2783 	xfs_inode_log_item_t	*iip;
2784 	xfs_lsn_t	lsn;
2785 
2786 	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS));
2787 
2788 	if (atomic_read(&ip->i_pincount) == 0) {
2789 		return;
2790 	}
2791 
2792 	iip = ip->i_itemp;
2793 	if (iip && iip->ili_last_lsn) {
2794 		lsn = iip->ili_last_lsn;
2795 	} else {
2796 		lsn = (xfs_lsn_t)0;
2797 	}
2798 
2799 	/*
2800 	 * Give the log a push so we don't wait here too long.
2801 	 */
2802 	xfs_log_force(ip->i_mount, lsn, XFS_LOG_FORCE);
2803 
2804 	wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
2805 }
2806 
2807 
2808 /*
2809  * xfs_iextents_copy()
2810  *
2811  * This is called to copy the REAL extents (as opposed to the delayed
2812  * allocation extents) from the inode into the given buffer.  It
2813  * returns the number of bytes copied into the buffer.
2814  *
2815  * If there are no delayed allocation extents, then we can just
2816  * memcpy() the extents into the buffer.  Otherwise, we need to
2817  * examine each extent in turn and skip those which are delayed.
2818  */
2819 int
2820 xfs_iextents_copy(
2821 	xfs_inode_t		*ip,
2822 	xfs_bmbt_rec_t		*buffer,
2823 	int			whichfork)
2824 {
2825 	int			copied;
2826 	xfs_bmbt_rec_t		*dest_ep;
2827 	xfs_bmbt_rec_t		*ep;
2828 #ifdef XFS_BMAP_TRACE
2829 	static char		fname[] = "xfs_iextents_copy";
2830 #endif
2831 	int			i;
2832 	xfs_ifork_t		*ifp;
2833 	int			nrecs;
2834 	xfs_fsblock_t		start_block;
2835 
2836 	ifp = XFS_IFORK_PTR(ip, whichfork);
2837 	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
2838 	ASSERT(ifp->if_bytes > 0);
2839 
2840 	nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2841 	xfs_bmap_trace_exlist(fname, ip, nrecs, whichfork);
2842 	ASSERT(nrecs > 0);
2843 
2844 	/*
2845 	 * There are some delayed allocation extents in the
2846 	 * inode, so copy the extents one at a time and skip
2847 	 * the delayed ones.  There must be at least one
2848 	 * non-delayed extent.
2849 	 */
2850 	dest_ep = buffer;
2851 	copied = 0;
2852 	for (i = 0; i < nrecs; i++) {
2853 		ep = xfs_iext_get_ext(ifp, i);
2854 		start_block = xfs_bmbt_get_startblock(ep);
2855 		if (ISNULLSTARTBLOCK(start_block)) {
2856 			/*
2857 			 * It's a delayed allocation extent, so skip it.
2858 			 */
2859 			continue;
2860 		}
2861 
2862 		/* Translate to on disk format */
2863 		put_unaligned(INT_GET(ep->l0, ARCH_CONVERT),
2864 			      (__uint64_t*)&dest_ep->l0);
2865 		put_unaligned(INT_GET(ep->l1, ARCH_CONVERT),
2866 			      (__uint64_t*)&dest_ep->l1);
2867 		dest_ep++;
2868 		copied++;
2869 	}
2870 	ASSERT(copied != 0);
2871 	xfs_validate_extents(ifp, copied, 1, XFS_EXTFMT_INODE(ip));
2872 
2873 	return (copied * (uint)sizeof(xfs_bmbt_rec_t));
2874 }
2875 
2876 /*
2877  * Each of the following cases stores data into the same region
2878  * of the on-disk inode, so only one of them can be valid at
2879  * any given time. While it is possible to have conflicting formats
2880  * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
2881  * in EXTENTS format, this can only happen when the fork has
2882  * changed formats after being modified but before being flushed.
2883  * In these cases, the format always takes precedence, because the
2884  * format indicates the current state of the fork.
2885  */
2886 /*ARGSUSED*/
2887 STATIC int
2888 xfs_iflush_fork(
2889 	xfs_inode_t		*ip,
2890 	xfs_dinode_t		*dip,
2891 	xfs_inode_log_item_t	*iip,
2892 	int			whichfork,
2893 	xfs_buf_t		*bp)
2894 {
2895 	char			*cp;
2896 	xfs_ifork_t		*ifp;
2897 	xfs_mount_t		*mp;
2898 #ifdef XFS_TRANS_DEBUG
2899 	int			first;
2900 #endif
2901 	static const short	brootflag[2] =
2902 		{ XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
2903 	static const short	dataflag[2] =
2904 		{ XFS_ILOG_DDATA, XFS_ILOG_ADATA };
2905 	static const short	extflag[2] =
2906 		{ XFS_ILOG_DEXT, XFS_ILOG_AEXT };
2907 
2908 	if (iip == NULL)
2909 		return 0;
2910 	ifp = XFS_IFORK_PTR(ip, whichfork);
2911 	/*
2912 	 * This can happen if we gave up in iformat in an error path,
2913 	 * for the attribute fork.
2914 	 */
2915 	if (ifp == NULL) {
2916 		ASSERT(whichfork == XFS_ATTR_FORK);
2917 		return 0;
2918 	}
2919 	cp = XFS_DFORK_PTR(dip, whichfork);
2920 	mp = ip->i_mount;
2921 	switch (XFS_IFORK_FORMAT(ip, whichfork)) {
2922 	case XFS_DINODE_FMT_LOCAL:
2923 		if ((iip->ili_format.ilf_fields & dataflag[whichfork]) &&
2924 		    (ifp->if_bytes > 0)) {
2925 			ASSERT(ifp->if_u1.if_data != NULL);
2926 			ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2927 			memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
2928 		}
2929 		break;
2930 
2931 	case XFS_DINODE_FMT_EXTENTS:
2932 		ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
2933 		       !(iip->ili_format.ilf_fields & extflag[whichfork]));
2934 		ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) ||
2935 			(ifp->if_bytes == 0));
2936 		ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) ||
2937 			(ifp->if_bytes > 0));
2938 		if ((iip->ili_format.ilf_fields & extflag[whichfork]) &&
2939 		    (ifp->if_bytes > 0)) {
2940 			ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
2941 			(void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
2942 				whichfork);
2943 		}
2944 		break;
2945 
2946 	case XFS_DINODE_FMT_BTREE:
2947 		if ((iip->ili_format.ilf_fields & brootflag[whichfork]) &&
2948 		    (ifp->if_broot_bytes > 0)) {
2949 			ASSERT(ifp->if_broot != NULL);
2950 			ASSERT(ifp->if_broot_bytes <=
2951 			       (XFS_IFORK_SIZE(ip, whichfork) +
2952 				XFS_BROOT_SIZE_ADJ));
2953 			xfs_bmbt_to_bmdr(ifp->if_broot, ifp->if_broot_bytes,
2954 				(xfs_bmdr_block_t *)cp,
2955 				XFS_DFORK_SIZE(dip, mp, whichfork));
2956 		}
2957 		break;
2958 
2959 	case XFS_DINODE_FMT_DEV:
2960 		if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) {
2961 			ASSERT(whichfork == XFS_DATA_FORK);
2962 			INT_SET(dip->di_u.di_dev, ARCH_CONVERT, ip->i_df.if_u2.if_rdev);
2963 		}
2964 		break;
2965 
2966 	case XFS_DINODE_FMT_UUID:
2967 		if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) {
2968 			ASSERT(whichfork == XFS_DATA_FORK);
2969 			memcpy(&dip->di_u.di_muuid, &ip->i_df.if_u2.if_uuid,
2970 				sizeof(uuid_t));
2971 		}
2972 		break;
2973 
2974 	default:
2975 		ASSERT(0);
2976 		break;
2977 	}
2978 
2979 	return 0;
2980 }
2981 
2982 /*
2983  * xfs_iflush() will write a modified inode's changes out to the
2984  * inode's on disk home.  The caller must have the inode lock held
2985  * in at least shared mode and the inode flush semaphore must be
2986  * held as well.  The inode lock will still be held upon return from
2987  * the call and the caller is free to unlock it.
2988  * The inode flush lock will be unlocked when the inode reaches the disk.
2989  * The flags indicate how the inode's buffer should be written out.
2990  */
2991 int
2992 xfs_iflush(
2993 	xfs_inode_t		*ip,
2994 	uint			flags)
2995 {
2996 	xfs_inode_log_item_t	*iip;
2997 	xfs_buf_t		*bp;
2998 	xfs_dinode_t		*dip;
2999 	xfs_mount_t		*mp;
3000 	int			error;
3001 	/* REFERENCED */
3002 	xfs_chash_t		*ch;
3003 	xfs_inode_t		*iq;
3004 	int			clcount;	/* count of inodes clustered */
3005 	int			bufwasdelwri;
3006 	enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) };
3007 	SPLDECL(s);
3008 
3009 	XFS_STATS_INC(xs_iflush_count);
3010 
3011 	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
3012 	ASSERT(issemalocked(&(ip->i_flock)));
3013 	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3014 	       ip->i_d.di_nextents > ip->i_df.if_ext_max);
3015 
3016 	iip = ip->i_itemp;
3017 	mp = ip->i_mount;
3018 
3019 	/*
3020 	 * If the inode isn't dirty, then just release the inode
3021 	 * flush lock and do nothing.
3022 	 */
3023 	if ((ip->i_update_core == 0) &&
3024 	    ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3025 		ASSERT((iip != NULL) ?
3026 			 !(iip->ili_item.li_flags & XFS_LI_IN_AIL) : 1);
3027 		xfs_ifunlock(ip);
3028 		return 0;
3029 	}
3030 
3031 	/*
3032 	 * We can't flush the inode until it is unpinned, so
3033 	 * wait for it.  We know noone new can pin it, because
3034 	 * we are holding the inode lock shared and you need
3035 	 * to hold it exclusively to pin the inode.
3036 	 */
3037 	xfs_iunpin_wait(ip);
3038 
3039 	/*
3040 	 * This may have been unpinned because the filesystem is shutting
3041 	 * down forcibly. If that's the case we must not write this inode
3042 	 * to disk, because the log record didn't make it to disk!
3043 	 */
3044 	if (XFS_FORCED_SHUTDOWN(mp)) {
3045 		ip->i_update_core = 0;
3046 		if (iip)
3047 			iip->ili_format.ilf_fields = 0;
3048 		xfs_ifunlock(ip);
3049 		return XFS_ERROR(EIO);
3050 	}
3051 
3052 	/*
3053 	 * Get the buffer containing the on-disk inode.
3054 	 */
3055 	error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0);
3056 	if (error) {
3057 		xfs_ifunlock(ip);
3058 		return error;
3059 	}
3060 
3061 	/*
3062 	 * Decide how buffer will be flushed out.  This is done before
3063 	 * the call to xfs_iflush_int because this field is zeroed by it.
3064 	 */
3065 	if (iip != NULL && iip->ili_format.ilf_fields != 0) {
3066 		/*
3067 		 * Flush out the inode buffer according to the directions
3068 		 * of the caller.  In the cases where the caller has given
3069 		 * us a choice choose the non-delwri case.  This is because
3070 		 * the inode is in the AIL and we need to get it out soon.
3071 		 */
3072 		switch (flags) {
3073 		case XFS_IFLUSH_SYNC:
3074 		case XFS_IFLUSH_DELWRI_ELSE_SYNC:
3075 			flags = 0;
3076 			break;
3077 		case XFS_IFLUSH_ASYNC:
3078 		case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
3079 			flags = INT_ASYNC;
3080 			break;
3081 		case XFS_IFLUSH_DELWRI:
3082 			flags = INT_DELWRI;
3083 			break;
3084 		default:
3085 			ASSERT(0);
3086 			flags = 0;
3087 			break;
3088 		}
3089 	} else {
3090 		switch (flags) {
3091 		case XFS_IFLUSH_DELWRI_ELSE_SYNC:
3092 		case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
3093 		case XFS_IFLUSH_DELWRI:
3094 			flags = INT_DELWRI;
3095 			break;
3096 		case XFS_IFLUSH_ASYNC:
3097 			flags = INT_ASYNC;
3098 			break;
3099 		case XFS_IFLUSH_SYNC:
3100 			flags = 0;
3101 			break;
3102 		default:
3103 			ASSERT(0);
3104 			flags = 0;
3105 			break;
3106 		}
3107 	}
3108 
3109 	/*
3110 	 * First flush out the inode that xfs_iflush was called with.
3111 	 */
3112 	error = xfs_iflush_int(ip, bp);
3113 	if (error) {
3114 		goto corrupt_out;
3115 	}
3116 
3117 	/*
3118 	 * inode clustering:
3119 	 * see if other inodes can be gathered into this write
3120 	 */
3121 
3122 	ip->i_chash->chl_buf = bp;
3123 
3124 	ch = XFS_CHASH(mp, ip->i_blkno);
3125 	s = mutex_spinlock(&ch->ch_lock);
3126 
3127 	clcount = 0;
3128 	for (iq = ip->i_cnext; iq != ip; iq = iq->i_cnext) {
3129 		/*
3130 		 * Do an un-protected check to see if the inode is dirty and
3131 		 * is a candidate for flushing.  These checks will be repeated
3132 		 * later after the appropriate locks are acquired.
3133 		 */
3134 		iip = iq->i_itemp;
3135 		if ((iq->i_update_core == 0) &&
3136 		    ((iip == NULL) ||
3137 		     !(iip->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
3138 		      xfs_ipincount(iq) == 0) {
3139 			continue;
3140 		}
3141 
3142 		/*
3143 		 * Try to get locks.  If any are unavailable,
3144 		 * then this inode cannot be flushed and is skipped.
3145 		 */
3146 
3147 		/* get inode locks (just i_lock) */
3148 		if (xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) {
3149 			/* get inode flush lock */
3150 			if (xfs_iflock_nowait(iq)) {
3151 				/* check if pinned */
3152 				if (xfs_ipincount(iq) == 0) {
3153 					/* arriving here means that
3154 					 * this inode can be flushed.
3155 					 * first re-check that it's
3156 					 * dirty
3157 					 */
3158 					iip = iq->i_itemp;
3159 					if ((iq->i_update_core != 0)||
3160 					    ((iip != NULL) &&
3161 					     (iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3162 						clcount++;
3163 						error = xfs_iflush_int(iq, bp);
3164 						if (error) {
3165 							xfs_iunlock(iq,
3166 								    XFS_ILOCK_SHARED);
3167 							goto cluster_corrupt_out;
3168 						}
3169 					} else {
3170 						xfs_ifunlock(iq);
3171 					}
3172 				} else {
3173 					xfs_ifunlock(iq);
3174 				}
3175 			}
3176 			xfs_iunlock(iq, XFS_ILOCK_SHARED);
3177 		}
3178 	}
3179 	mutex_spinunlock(&ch->ch_lock, s);
3180 
3181 	if (clcount) {
3182 		XFS_STATS_INC(xs_icluster_flushcnt);
3183 		XFS_STATS_ADD(xs_icluster_flushinode, clcount);
3184 	}
3185 
3186 	/*
3187 	 * If the buffer is pinned then push on the log so we won't
3188 	 * get stuck waiting in the write for too long.
3189 	 */
3190 	if (XFS_BUF_ISPINNED(bp)){
3191 		xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
3192 	}
3193 
3194 	if (flags & INT_DELWRI) {
3195 		xfs_bdwrite(mp, bp);
3196 	} else if (flags & INT_ASYNC) {
3197 		xfs_bawrite(mp, bp);
3198 	} else {
3199 		error = xfs_bwrite(mp, bp);
3200 	}
3201 	return error;
3202 
3203 corrupt_out:
3204 	xfs_buf_relse(bp);
3205 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3206 	xfs_iflush_abort(ip);
3207 	/*
3208 	 * Unlocks the flush lock
3209 	 */
3210 	return XFS_ERROR(EFSCORRUPTED);
3211 
3212 cluster_corrupt_out:
3213 	/* Corruption detected in the clustering loop.  Invalidate the
3214 	 * inode buffer and shut down the filesystem.
3215 	 */
3216 	mutex_spinunlock(&ch->ch_lock, s);
3217 
3218 	/*
3219 	 * Clean up the buffer.  If it was B_DELWRI, just release it --
3220 	 * brelse can handle it with no problems.  If not, shut down the
3221 	 * filesystem before releasing the buffer.
3222 	 */
3223 	if ((bufwasdelwri= XFS_BUF_ISDELAYWRITE(bp))) {
3224 		xfs_buf_relse(bp);
3225 	}
3226 
3227 	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3228 
3229 	if(!bufwasdelwri)  {
3230 		/*
3231 		 * Just like incore_relse: if we have b_iodone functions,
3232 		 * mark the buffer as an error and call them.  Otherwise
3233 		 * mark it as stale and brelse.
3234 		 */
3235 		if (XFS_BUF_IODONE_FUNC(bp)) {
3236 			XFS_BUF_CLR_BDSTRAT_FUNC(bp);
3237 			XFS_BUF_UNDONE(bp);
3238 			XFS_BUF_STALE(bp);
3239 			XFS_BUF_SHUT(bp);
3240 			XFS_BUF_ERROR(bp,EIO);
3241 			xfs_biodone(bp);
3242 		} else {
3243 			XFS_BUF_STALE(bp);
3244 			xfs_buf_relse(bp);
3245 		}
3246 	}
3247 
3248 	xfs_iflush_abort(iq);
3249 	/*
3250 	 * Unlocks the flush lock
3251 	 */
3252 	return XFS_ERROR(EFSCORRUPTED);
3253 }
3254 
3255 
3256 STATIC int
3257 xfs_iflush_int(
3258 	xfs_inode_t		*ip,
3259 	xfs_buf_t		*bp)
3260 {
3261 	xfs_inode_log_item_t	*iip;
3262 	xfs_dinode_t		*dip;
3263 	xfs_mount_t		*mp;
3264 #ifdef XFS_TRANS_DEBUG
3265 	int			first;
3266 #endif
3267 	SPLDECL(s);
3268 
3269 	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
3270 	ASSERT(issemalocked(&(ip->i_flock)));
3271 	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3272 	       ip->i_d.di_nextents > ip->i_df.if_ext_max);
3273 
3274 	iip = ip->i_itemp;
3275 	mp = ip->i_mount;
3276 
3277 
3278 	/*
3279 	 * If the inode isn't dirty, then just release the inode
3280 	 * flush lock and do nothing.
3281 	 */
3282 	if ((ip->i_update_core == 0) &&
3283 	    ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3284 		xfs_ifunlock(ip);
3285 		return 0;
3286 	}
3287 
3288 	/* set *dip = inode's place in the buffer */
3289 	dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_boffset);
3290 
3291 	/*
3292 	 * Clear i_update_core before copying out the data.
3293 	 * This is for coordination with our timestamp updates
3294 	 * that don't hold the inode lock. They will always
3295 	 * update the timestamps BEFORE setting i_update_core,
3296 	 * so if we clear i_update_core after they set it we
3297 	 * are guaranteed to see their updates to the timestamps.
3298 	 * I believe that this depends on strongly ordered memory
3299 	 * semantics, but we have that.  We use the SYNCHRONIZE
3300 	 * macro to make sure that the compiler does not reorder
3301 	 * the i_update_core access below the data copy below.
3302 	 */
3303 	ip->i_update_core = 0;
3304 	SYNCHRONIZE();
3305 
3306 	/*
3307 	 * Make sure to get the latest atime from the Linux inode.
3308 	 */
3309 	xfs_synchronize_atime(ip);
3310 
3311 	if (XFS_TEST_ERROR(INT_GET(dip->di_core.di_magic,ARCH_CONVERT) != XFS_DINODE_MAGIC,
3312 			       mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
3313 		xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3314 		    "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p",
3315 			ip->i_ino, (int) INT_GET(dip->di_core.di_magic, ARCH_CONVERT), dip);
3316 		goto corrupt_out;
3317 	}
3318 	if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
3319 				mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
3320 		xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3321 			"xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
3322 			ip->i_ino, ip, ip->i_d.di_magic);
3323 		goto corrupt_out;
3324 	}
3325 	if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
3326 		if (XFS_TEST_ERROR(
3327 		    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3328 		    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
3329 		    mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
3330 			xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3331 				"xfs_iflush: Bad regular inode %Lu, ptr 0x%p",
3332 				ip->i_ino, ip);
3333 			goto corrupt_out;
3334 		}
3335 	} else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
3336 		if (XFS_TEST_ERROR(
3337 		    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3338 		    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3339 		    (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
3340 		    mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
3341 			xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3342 				"xfs_iflush: Bad directory inode %Lu, ptr 0x%p",
3343 				ip->i_ino, ip);
3344 			goto corrupt_out;
3345 		}
3346 	}
3347 	if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
3348 				ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
3349 				XFS_RANDOM_IFLUSH_5)) {
3350 		xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3351 			"xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p",
3352 			ip->i_ino,
3353 			ip->i_d.di_nextents + ip->i_d.di_anextents,
3354 			ip->i_d.di_nblocks,
3355 			ip);
3356 		goto corrupt_out;
3357 	}
3358 	if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3359 				mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
3360 		xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3361 			"xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
3362 			ip->i_ino, ip->i_d.di_forkoff, ip);
3363 		goto corrupt_out;
3364 	}
3365 	/*
3366 	 * bump the flush iteration count, used to detect flushes which
3367 	 * postdate a log record during recovery.
3368 	 */
3369 
3370 	ip->i_d.di_flushiter++;
3371 
3372 	/*
3373 	 * Copy the dirty parts of the inode into the on-disk
3374 	 * inode.  We always copy out the core of the inode,
3375 	 * because if the inode is dirty at all the core must
3376 	 * be.
3377 	 */
3378 	xfs_xlate_dinode_core((xfs_caddr_t)&(dip->di_core), &(ip->i_d), -1);
3379 
3380 	/* Wrap, we never let the log put out DI_MAX_FLUSH */
3381 	if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3382 		ip->i_d.di_flushiter = 0;
3383 
3384 	/*
3385 	 * If this is really an old format inode and the superblock version
3386 	 * has not been updated to support only new format inodes, then
3387 	 * convert back to the old inode format.  If the superblock version
3388 	 * has been updated, then make the conversion permanent.
3389 	 */
3390 	ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 ||
3391 	       XFS_SB_VERSION_HASNLINK(&mp->m_sb));
3392 	if (ip->i_d.di_version == XFS_DINODE_VERSION_1) {
3393 		if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) {
3394 			/*
3395 			 * Convert it back.
3396 			 */
3397 			ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
3398 			INT_SET(dip->di_core.di_onlink, ARCH_CONVERT, ip->i_d.di_nlink);
3399 		} else {
3400 			/*
3401 			 * The superblock version has already been bumped,
3402 			 * so just make the conversion to the new inode
3403 			 * format permanent.
3404 			 */
3405 			ip->i_d.di_version = XFS_DINODE_VERSION_2;
3406 			INT_SET(dip->di_core.di_version, ARCH_CONVERT, XFS_DINODE_VERSION_2);
3407 			ip->i_d.di_onlink = 0;
3408 			dip->di_core.di_onlink = 0;
3409 			memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
3410 			memset(&(dip->di_core.di_pad[0]), 0,
3411 			      sizeof(dip->di_core.di_pad));
3412 			ASSERT(ip->i_d.di_projid == 0);
3413 		}
3414 	}
3415 
3416 	if (xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp) == EFSCORRUPTED) {
3417 		goto corrupt_out;
3418 	}
3419 
3420 	if (XFS_IFORK_Q(ip)) {
3421 		/*
3422 		 * The only error from xfs_iflush_fork is on the data fork.
3423 		 */
3424 		(void) xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
3425 	}
3426 	xfs_inobp_check(mp, bp);
3427 
3428 	/*
3429 	 * We've recorded everything logged in the inode, so we'd
3430 	 * like to clear the ilf_fields bits so we don't log and
3431 	 * flush things unnecessarily.  However, we can't stop
3432 	 * logging all this information until the data we've copied
3433 	 * into the disk buffer is written to disk.  If we did we might
3434 	 * overwrite the copy of the inode in the log with all the
3435 	 * data after re-logging only part of it, and in the face of
3436 	 * a crash we wouldn't have all the data we need to recover.
3437 	 *
3438 	 * What we do is move the bits to the ili_last_fields field.
3439 	 * When logging the inode, these bits are moved back to the
3440 	 * ilf_fields field.  In the xfs_iflush_done() routine we
3441 	 * clear ili_last_fields, since we know that the information
3442 	 * those bits represent is permanently on disk.  As long as
3443 	 * the flush completes before the inode is logged again, then
3444 	 * both ilf_fields and ili_last_fields will be cleared.
3445 	 *
3446 	 * We can play with the ilf_fields bits here, because the inode
3447 	 * lock must be held exclusively in order to set bits there
3448 	 * and the flush lock protects the ili_last_fields bits.
3449 	 * Set ili_logged so the flush done
3450 	 * routine can tell whether or not to look in the AIL.
3451 	 * Also, store the current LSN of the inode so that we can tell
3452 	 * whether the item has moved in the AIL from xfs_iflush_done().
3453 	 * In order to read the lsn we need the AIL lock, because
3454 	 * it is a 64 bit value that cannot be read atomically.
3455 	 */
3456 	if (iip != NULL && iip->ili_format.ilf_fields != 0) {
3457 		iip->ili_last_fields = iip->ili_format.ilf_fields;
3458 		iip->ili_format.ilf_fields = 0;
3459 		iip->ili_logged = 1;
3460 
3461 		ASSERT(sizeof(xfs_lsn_t) == 8);	/* don't lock if it shrinks */
3462 		AIL_LOCK(mp,s);
3463 		iip->ili_flush_lsn = iip->ili_item.li_lsn;
3464 		AIL_UNLOCK(mp, s);
3465 
3466 		/*
3467 		 * Attach the function xfs_iflush_done to the inode's
3468 		 * buffer.  This will remove the inode from the AIL
3469 		 * and unlock the inode's flush lock when the inode is
3470 		 * completely written to disk.
3471 		 */
3472 		xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*))
3473 				      xfs_iflush_done, (xfs_log_item_t *)iip);
3474 
3475 		ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
3476 		ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL);
3477 	} else {
3478 		/*
3479 		 * We're flushing an inode which is not in the AIL and has
3480 		 * not been logged but has i_update_core set.  For this
3481 		 * case we can use a B_DELWRI flush and immediately drop
3482 		 * the inode flush lock because we can avoid the whole
3483 		 * AIL state thing.  It's OK to drop the flush lock now,
3484 		 * because we've already locked the buffer and to do anything
3485 		 * you really need both.
3486 		 */
3487 		if (iip != NULL) {
3488 			ASSERT(iip->ili_logged == 0);
3489 			ASSERT(iip->ili_last_fields == 0);
3490 			ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0);
3491 		}
3492 		xfs_ifunlock(ip);
3493 	}
3494 
3495 	return 0;
3496 
3497 corrupt_out:
3498 	return XFS_ERROR(EFSCORRUPTED);
3499 }
3500 
3501 
3502 /*
3503  * Flush all inactive inodes in mp.
3504  */
3505 void
3506 xfs_iflush_all(
3507 	xfs_mount_t	*mp)
3508 {
3509 	xfs_inode_t	*ip;
3510 	bhv_vnode_t	*vp;
3511 
3512  again:
3513 	XFS_MOUNT_ILOCK(mp);
3514 	ip = mp->m_inodes;
3515 	if (ip == NULL)
3516 		goto out;
3517 
3518 	do {
3519 		/* Make sure we skip markers inserted by sync */
3520 		if (ip->i_mount == NULL) {
3521 			ip = ip->i_mnext;
3522 			continue;
3523 		}
3524 
3525 		vp = XFS_ITOV_NULL(ip);
3526 		if (!vp) {
3527 			XFS_MOUNT_IUNLOCK(mp);
3528 			xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC);
3529 			goto again;
3530 		}
3531 
3532 		ASSERT(vn_count(vp) == 0);
3533 
3534 		ip = ip->i_mnext;
3535 	} while (ip != mp->m_inodes);
3536  out:
3537 	XFS_MOUNT_IUNLOCK(mp);
3538 }
3539 
3540 /*
3541  * xfs_iaccess: check accessibility of inode for mode.
3542  */
3543 int
3544 xfs_iaccess(
3545 	xfs_inode_t	*ip,
3546 	mode_t		mode,
3547 	cred_t		*cr)
3548 {
3549 	int		error;
3550 	mode_t		orgmode = mode;
3551 	struct inode	*inode = vn_to_inode(XFS_ITOV(ip));
3552 
3553 	if (mode & S_IWUSR) {
3554 		umode_t		imode = inode->i_mode;
3555 
3556 		if (IS_RDONLY(inode) &&
3557 		    (S_ISREG(imode) || S_ISDIR(imode) || S_ISLNK(imode)))
3558 			return XFS_ERROR(EROFS);
3559 
3560 		if (IS_IMMUTABLE(inode))
3561 			return XFS_ERROR(EACCES);
3562 	}
3563 
3564 	/*
3565 	 * If there's an Access Control List it's used instead of
3566 	 * the mode bits.
3567 	 */
3568 	if ((error = _ACL_XFS_IACCESS(ip, mode, cr)) != -1)
3569 		return error ? XFS_ERROR(error) : 0;
3570 
3571 	if (current_fsuid(cr) != ip->i_d.di_uid) {
3572 		mode >>= 3;
3573 		if (!in_group_p((gid_t)ip->i_d.di_gid))
3574 			mode >>= 3;
3575 	}
3576 
3577 	/*
3578 	 * If the DACs are ok we don't need any capability check.
3579 	 */
3580 	if ((ip->i_d.di_mode & mode) == mode)
3581 		return 0;
3582 	/*
3583 	 * Read/write DACs are always overridable.
3584 	 * Executable DACs are overridable if at least one exec bit is set.
3585 	 */
3586 	if (!(orgmode & S_IXUSR) ||
3587 	    (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode))
3588 		if (capable_cred(cr, CAP_DAC_OVERRIDE))
3589 			return 0;
3590 
3591 	if ((orgmode == S_IRUSR) ||
3592 	    (S_ISDIR(inode->i_mode) && (!(orgmode & S_IWUSR)))) {
3593 		if (capable_cred(cr, CAP_DAC_READ_SEARCH))
3594 			return 0;
3595 #ifdef	NOISE
3596 		cmn_err(CE_NOTE, "Ick: mode=%o, orgmode=%o", mode, orgmode);
3597 #endif	/* NOISE */
3598 		return XFS_ERROR(EACCES);
3599 	}
3600 	return XFS_ERROR(EACCES);
3601 }
3602 
3603 /*
3604  * xfs_iroundup: round up argument to next power of two
3605  */
3606 uint
3607 xfs_iroundup(
3608 	uint	v)
3609 {
3610 	int i;
3611 	uint m;
3612 
3613 	if ((v & (v - 1)) == 0)
3614 		return v;
3615 	ASSERT((v & 0x80000000) == 0);
3616 	if ((v & (v + 1)) == 0)
3617 		return v + 1;
3618 	for (i = 0, m = 1; i < 31; i++, m <<= 1) {
3619 		if (v & m)
3620 			continue;
3621 		v |= m;
3622 		if ((v & (v + 1)) == 0)
3623 			return v + 1;
3624 	}
3625 	ASSERT(0);
3626 	return( 0 );
3627 }
3628 
3629 #ifdef XFS_ILOCK_TRACE
3630 ktrace_t	*xfs_ilock_trace_buf;
3631 
3632 void
3633 xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra)
3634 {
3635 	ktrace_enter(ip->i_lock_trace,
3636 		     (void *)ip,
3637 		     (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */
3638 		     (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */
3639 		     (void *)ra,		/* caller of ilock */
3640 		     (void *)(unsigned long)current_cpu(),
3641 		     (void *)(unsigned long)current_pid(),
3642 		     NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
3643 }
3644 #endif
3645 
3646 /*
3647  * Return a pointer to the extent record at file index idx.
3648  */
3649 xfs_bmbt_rec_t *
3650 xfs_iext_get_ext(
3651 	xfs_ifork_t	*ifp,		/* inode fork pointer */
3652 	xfs_extnum_t	idx)		/* index of target extent */
3653 {
3654 	ASSERT(idx >= 0);
3655 	if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
3656 		return ifp->if_u1.if_ext_irec->er_extbuf;
3657 	} else if (ifp->if_flags & XFS_IFEXTIREC) {
3658 		xfs_ext_irec_t	*erp;		/* irec pointer */
3659 		int		erp_idx = 0;	/* irec index */
3660 		xfs_extnum_t	page_idx = idx;	/* ext index in target list */
3661 
3662 		erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3663 		return &erp->er_extbuf[page_idx];
3664 	} else if (ifp->if_bytes) {
3665 		return &ifp->if_u1.if_extents[idx];
3666 	} else {
3667 		return NULL;
3668 	}
3669 }
3670 
3671 /*
3672  * Insert new item(s) into the extent records for incore inode
3673  * fork 'ifp'.  'count' new items are inserted at index 'idx'.
3674  */
3675 void
3676 xfs_iext_insert(
3677 	xfs_ifork_t	*ifp,		/* inode fork pointer */
3678 	xfs_extnum_t	idx,		/* starting index of new items */
3679 	xfs_extnum_t	count,		/* number of inserted items */
3680 	xfs_bmbt_irec_t	*new)		/* items to insert */
3681 {
3682 	xfs_bmbt_rec_t	*ep;		/* extent record pointer */
3683 	xfs_extnum_t	i;		/* extent record index */
3684 
3685 	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3686 	xfs_iext_add(ifp, idx, count);
3687 	for (i = idx; i < idx + count; i++, new++) {
3688 		ep = xfs_iext_get_ext(ifp, i);
3689 		xfs_bmbt_set_all(ep, new);
3690 	}
3691 }
3692 
3693 /*
3694  * This is called when the amount of space required for incore file
3695  * extents needs to be increased. The ext_diff parameter stores the
3696  * number of new extents being added and the idx parameter contains
3697  * the extent index where the new extents will be added. If the new
3698  * extents are being appended, then we just need to (re)allocate and
3699  * initialize the space. Otherwise, if the new extents are being
3700  * inserted into the middle of the existing entries, a bit more work
3701  * is required to make room for the new extents to be inserted. The
3702  * caller is responsible for filling in the new extent entries upon
3703  * return.
3704  */
3705 void
3706 xfs_iext_add(
3707 	xfs_ifork_t	*ifp,		/* inode fork pointer */
3708 	xfs_extnum_t	idx,		/* index to begin adding exts */
3709 	int		ext_diff)	/* number of extents to add */
3710 {
3711 	int		byte_diff;	/* new bytes being added */
3712 	int		new_size;	/* size of extents after adding */
3713 	xfs_extnum_t	nextents;	/* number of extents in file */
3714 
3715 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3716 	ASSERT((idx >= 0) && (idx <= nextents));
3717 	byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
3718 	new_size = ifp->if_bytes + byte_diff;
3719 	/*
3720 	 * If the new number of extents (nextents + ext_diff)
3721 	 * fits inside the inode, then continue to use the inline
3722 	 * extent buffer.
3723 	 */
3724 	if (nextents + ext_diff <= XFS_INLINE_EXTS) {
3725 		if (idx < nextents) {
3726 			memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
3727 				&ifp->if_u2.if_inline_ext[idx],
3728 				(nextents - idx) * sizeof(xfs_bmbt_rec_t));
3729 			memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
3730 		}
3731 		ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3732 		ifp->if_real_bytes = 0;
3733 		ifp->if_lastex = nextents + ext_diff;
3734 	}
3735 	/*
3736 	 * Otherwise use a linear (direct) extent list.
3737 	 * If the extents are currently inside the inode,
3738 	 * xfs_iext_realloc_direct will switch us from
3739 	 * inline to direct extent allocation mode.
3740 	 */
3741 	else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
3742 		xfs_iext_realloc_direct(ifp, new_size);
3743 		if (idx < nextents) {
3744 			memmove(&ifp->if_u1.if_extents[idx + ext_diff],
3745 				&ifp->if_u1.if_extents[idx],
3746 				(nextents - idx) * sizeof(xfs_bmbt_rec_t));
3747 			memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
3748 		}
3749 	}
3750 	/* Indirection array */
3751 	else {
3752 		xfs_ext_irec_t	*erp;
3753 		int		erp_idx = 0;
3754 		int		page_idx = idx;
3755 
3756 		ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
3757 		if (ifp->if_flags & XFS_IFEXTIREC) {
3758 			erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
3759 		} else {
3760 			xfs_iext_irec_init(ifp);
3761 			ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3762 			erp = ifp->if_u1.if_ext_irec;
3763 		}
3764 		/* Extents fit in target extent page */
3765 		if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
3766 			if (page_idx < erp->er_extcount) {
3767 				memmove(&erp->er_extbuf[page_idx + ext_diff],
3768 					&erp->er_extbuf[page_idx],
3769 					(erp->er_extcount - page_idx) *
3770 					sizeof(xfs_bmbt_rec_t));
3771 				memset(&erp->er_extbuf[page_idx], 0, byte_diff);
3772 			}
3773 			erp->er_extcount += ext_diff;
3774 			xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3775 		}
3776 		/* Insert a new extent page */
3777 		else if (erp) {
3778 			xfs_iext_add_indirect_multi(ifp,
3779 				erp_idx, page_idx, ext_diff);
3780 		}
3781 		/*
3782 		 * If extent(s) are being appended to the last page in
3783 		 * the indirection array and the new extent(s) don't fit
3784 		 * in the page, then erp is NULL and erp_idx is set to
3785 		 * the next index needed in the indirection array.
3786 		 */
3787 		else {
3788 			int	count = ext_diff;
3789 
3790 			while (count) {
3791 				erp = xfs_iext_irec_new(ifp, erp_idx);
3792 				erp->er_extcount = count;
3793 				count -= MIN(count, (int)XFS_LINEAR_EXTS);
3794 				if (count) {
3795 					erp_idx++;
3796 				}
3797 			}
3798 		}
3799 	}
3800 	ifp->if_bytes = new_size;
3801 }
3802 
3803 /*
3804  * This is called when incore extents are being added to the indirection
3805  * array and the new extents do not fit in the target extent list. The
3806  * erp_idx parameter contains the irec index for the target extent list
3807  * in the indirection array, and the idx parameter contains the extent
3808  * index within the list. The number of extents being added is stored
3809  * in the count parameter.
3810  *
3811  *    |-------|   |-------|
3812  *    |       |   |       |    idx - number of extents before idx
3813  *    |  idx  |   | count |
3814  *    |       |   |       |    count - number of extents being inserted at idx
3815  *    |-------|   |-------|
3816  *    | count |   | nex2  |    nex2 - number of extents after idx + count
3817  *    |-------|   |-------|
3818  */
3819 void
3820 xfs_iext_add_indirect_multi(
3821 	xfs_ifork_t	*ifp,			/* inode fork pointer */
3822 	int		erp_idx,		/* target extent irec index */
3823 	xfs_extnum_t	idx,			/* index within target list */
3824 	int		count)			/* new extents being added */
3825 {
3826 	int		byte_diff;		/* new bytes being added */
3827 	xfs_ext_irec_t	*erp;			/* pointer to irec entry */
3828 	xfs_extnum_t	ext_diff;		/* number of extents to add */
3829 	xfs_extnum_t	ext_cnt;		/* new extents still needed */
3830 	xfs_extnum_t	nex2;			/* extents after idx + count */
3831 	xfs_bmbt_rec_t	*nex2_ep = NULL;	/* temp list for nex2 extents */
3832 	int		nlists;			/* number of irec's (lists) */
3833 
3834 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3835 	erp = &ifp->if_u1.if_ext_irec[erp_idx];
3836 	nex2 = erp->er_extcount - idx;
3837 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3838 
3839 	/*
3840 	 * Save second part of target extent list
3841 	 * (all extents past */
3842 	if (nex2) {
3843 		byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3844 		nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_SLEEP);
3845 		memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
3846 		erp->er_extcount -= nex2;
3847 		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
3848 		memset(&erp->er_extbuf[idx], 0, byte_diff);
3849 	}
3850 
3851 	/*
3852 	 * Add the new extents to the end of the target
3853 	 * list, then allocate new irec record(s) and
3854 	 * extent buffer(s) as needed to store the rest
3855 	 * of the new extents.
3856 	 */
3857 	ext_cnt = count;
3858 	ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
3859 	if (ext_diff) {
3860 		erp->er_extcount += ext_diff;
3861 		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3862 		ext_cnt -= ext_diff;
3863 	}
3864 	while (ext_cnt) {
3865 		erp_idx++;
3866 		erp = xfs_iext_irec_new(ifp, erp_idx);
3867 		ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
3868 		erp->er_extcount = ext_diff;
3869 		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3870 		ext_cnt -= ext_diff;
3871 	}
3872 
3873 	/* Add nex2 extents back to indirection array */
3874 	if (nex2) {
3875 		xfs_extnum_t	ext_avail;
3876 		int		i;
3877 
3878 		byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3879 		ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
3880 		i = 0;
3881 		/*
3882 		 * If nex2 extents fit in the current page, append
3883 		 * nex2_ep after the new extents.
3884 		 */
3885 		if (nex2 <= ext_avail) {
3886 			i = erp->er_extcount;
3887 		}
3888 		/*
3889 		 * Otherwise, check if space is available in the
3890 		 * next page.
3891 		 */
3892 		else if ((erp_idx < nlists - 1) &&
3893 			 (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
3894 			  ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
3895 			erp_idx++;
3896 			erp++;
3897 			/* Create a hole for nex2 extents */
3898 			memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
3899 				erp->er_extcount * sizeof(xfs_bmbt_rec_t));
3900 		}
3901 		/*
3902 		 * Final choice, create a new extent page for
3903 		 * nex2 extents.
3904 		 */
3905 		else {
3906 			erp_idx++;
3907 			erp = xfs_iext_irec_new(ifp, erp_idx);
3908 		}
3909 		memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
3910 		kmem_free(nex2_ep, byte_diff);
3911 		erp->er_extcount += nex2;
3912 		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
3913 	}
3914 }
3915 
3916 /*
3917  * This is called when the amount of space required for incore file
3918  * extents needs to be decreased. The ext_diff parameter stores the
3919  * number of extents to be removed and the idx parameter contains
3920  * the extent index where the extents will be removed from.
3921  *
3922  * If the amount of space needed has decreased below the linear
3923  * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
3924  * extent array.  Otherwise, use kmem_realloc() to adjust the
3925  * size to what is needed.
3926  */
3927 void
3928 xfs_iext_remove(
3929 	xfs_ifork_t	*ifp,		/* inode fork pointer */
3930 	xfs_extnum_t	idx,		/* index to begin removing exts */
3931 	int		ext_diff)	/* number of extents to remove */
3932 {
3933 	xfs_extnum_t	nextents;	/* number of extents in file */
3934 	int		new_size;	/* size of extents after removal */
3935 
3936 	ASSERT(ext_diff > 0);
3937 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3938 	new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
3939 
3940 	if (new_size == 0) {
3941 		xfs_iext_destroy(ifp);
3942 	} else if (ifp->if_flags & XFS_IFEXTIREC) {
3943 		xfs_iext_remove_indirect(ifp, idx, ext_diff);
3944 	} else if (ifp->if_real_bytes) {
3945 		xfs_iext_remove_direct(ifp, idx, ext_diff);
3946 	} else {
3947 		xfs_iext_remove_inline(ifp, idx, ext_diff);
3948 	}
3949 	ifp->if_bytes = new_size;
3950 }
3951 
3952 /*
3953  * This removes ext_diff extents from the inline buffer, beginning
3954  * at extent index idx.
3955  */
3956 void
3957 xfs_iext_remove_inline(
3958 	xfs_ifork_t	*ifp,		/* inode fork pointer */
3959 	xfs_extnum_t	idx,		/* index to begin removing exts */
3960 	int		ext_diff)	/* number of extents to remove */
3961 {
3962 	int		nextents;	/* number of extents in file */
3963 
3964 	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3965 	ASSERT(idx < XFS_INLINE_EXTS);
3966 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3967 	ASSERT(((nextents - ext_diff) > 0) &&
3968 		(nextents - ext_diff) < XFS_INLINE_EXTS);
3969 
3970 	if (idx + ext_diff < nextents) {
3971 		memmove(&ifp->if_u2.if_inline_ext[idx],
3972 			&ifp->if_u2.if_inline_ext[idx + ext_diff],
3973 			(nextents - (idx + ext_diff)) *
3974 			 sizeof(xfs_bmbt_rec_t));
3975 		memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
3976 			0, ext_diff * sizeof(xfs_bmbt_rec_t));
3977 	} else {
3978 		memset(&ifp->if_u2.if_inline_ext[idx], 0,
3979 			ext_diff * sizeof(xfs_bmbt_rec_t));
3980 	}
3981 }
3982 
3983 /*
3984  * This removes ext_diff extents from a linear (direct) extent list,
3985  * beginning at extent index idx. If the extents are being removed
3986  * from the end of the list (ie. truncate) then we just need to re-
3987  * allocate the list to remove the extra space. Otherwise, if the
3988  * extents are being removed from the middle of the existing extent
3989  * entries, then we first need to move the extent records beginning
3990  * at idx + ext_diff up in the list to overwrite the records being
3991  * removed, then remove the extra space via kmem_realloc.
3992  */
3993 void
3994 xfs_iext_remove_direct(
3995 	xfs_ifork_t	*ifp,		/* inode fork pointer */
3996 	xfs_extnum_t	idx,		/* index to begin removing exts */
3997 	int		ext_diff)	/* number of extents to remove */
3998 {
3999 	xfs_extnum_t	nextents;	/* number of extents in file */
4000 	int		new_size;	/* size of extents after removal */
4001 
4002 	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4003 	new_size = ifp->if_bytes -
4004 		(ext_diff * sizeof(xfs_bmbt_rec_t));
4005 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4006 
4007 	if (new_size == 0) {
4008 		xfs_iext_destroy(ifp);
4009 		return;
4010 	}
4011 	/* Move extents up in the list (if needed) */
4012 	if (idx + ext_diff < nextents) {
4013 		memmove(&ifp->if_u1.if_extents[idx],
4014 			&ifp->if_u1.if_extents[idx + ext_diff],
4015 			(nextents - (idx + ext_diff)) *
4016 			 sizeof(xfs_bmbt_rec_t));
4017 	}
4018 	memset(&ifp->if_u1.if_extents[nextents - ext_diff],
4019 		0, ext_diff * sizeof(xfs_bmbt_rec_t));
4020 	/*
4021 	 * Reallocate the direct extent list. If the extents
4022 	 * will fit inside the inode then xfs_iext_realloc_direct
4023 	 * will switch from direct to inline extent allocation
4024 	 * mode for us.
4025 	 */
4026 	xfs_iext_realloc_direct(ifp, new_size);
4027 	ifp->if_bytes = new_size;
4028 }
4029 
4030 /*
4031  * This is called when incore extents are being removed from the
4032  * indirection array and the extents being removed span multiple extent
4033  * buffers. The idx parameter contains the file extent index where we
4034  * want to begin removing extents, and the count parameter contains
4035  * how many extents need to be removed.
4036  *
4037  *    |-------|   |-------|
4038  *    | nex1  |   |       |    nex1 - number of extents before idx
4039  *    |-------|   | count |
4040  *    |       |   |       |    count - number of extents being removed at idx
4041  *    | count |   |-------|
4042  *    |       |   | nex2  |    nex2 - number of extents after idx + count
4043  *    |-------|   |-------|
4044  */
4045 void
4046 xfs_iext_remove_indirect(
4047 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4048 	xfs_extnum_t	idx,		/* index to begin removing extents */
4049 	int		count)		/* number of extents to remove */
4050 {
4051 	xfs_ext_irec_t	*erp;		/* indirection array pointer */
4052 	int		erp_idx = 0;	/* indirection array index */
4053 	xfs_extnum_t	ext_cnt;	/* extents left to remove */
4054 	xfs_extnum_t	ext_diff;	/* extents to remove in current list */
4055 	xfs_extnum_t	nex1;		/* number of extents before idx */
4056 	xfs_extnum_t	nex2;		/* extents after idx + count */
4057 	int		nlists;		/* entries in indirection array */
4058 	int		page_idx = idx;	/* index in target extent list */
4059 
4060 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4061 	erp = xfs_iext_idx_to_irec(ifp,  &page_idx, &erp_idx, 0);
4062 	ASSERT(erp != NULL);
4063 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4064 	nex1 = page_idx;
4065 	ext_cnt = count;
4066 	while (ext_cnt) {
4067 		nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
4068 		ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
4069 		/*
4070 		 * Check for deletion of entire list;
4071 		 * xfs_iext_irec_remove() updates extent offsets.
4072 		 */
4073 		if (ext_diff == erp->er_extcount) {
4074 			xfs_iext_irec_remove(ifp, erp_idx);
4075 			ext_cnt -= ext_diff;
4076 			nex1 = 0;
4077 			if (ext_cnt) {
4078 				ASSERT(erp_idx < ifp->if_real_bytes /
4079 					XFS_IEXT_BUFSZ);
4080 				erp = &ifp->if_u1.if_ext_irec[erp_idx];
4081 				nex1 = 0;
4082 				continue;
4083 			} else {
4084 				break;
4085 			}
4086 		}
4087 		/* Move extents up (if needed) */
4088 		if (nex2) {
4089 			memmove(&erp->er_extbuf[nex1],
4090 				&erp->er_extbuf[nex1 + ext_diff],
4091 				nex2 * sizeof(xfs_bmbt_rec_t));
4092 		}
4093 		/* Zero out rest of page */
4094 		memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
4095 			((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
4096 		/* Update remaining counters */
4097 		erp->er_extcount -= ext_diff;
4098 		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
4099 		ext_cnt -= ext_diff;
4100 		nex1 = 0;
4101 		erp_idx++;
4102 		erp++;
4103 	}
4104 	ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
4105 	xfs_iext_irec_compact(ifp);
4106 }
4107 
4108 /*
4109  * Create, destroy, or resize a linear (direct) block of extents.
4110  */
4111 void
4112 xfs_iext_realloc_direct(
4113 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4114 	int		new_size)	/* new size of extents */
4115 {
4116 	int		rnew_size;	/* real new size of extents */
4117 
4118 	rnew_size = new_size;
4119 
4120 	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
4121 		((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
4122 		 (new_size != ifp->if_real_bytes)));
4123 
4124 	/* Free extent records */
4125 	if (new_size == 0) {
4126 		xfs_iext_destroy(ifp);
4127 	}
4128 	/* Resize direct extent list and zero any new bytes */
4129 	else if (ifp->if_real_bytes) {
4130 		/* Check if extents will fit inside the inode */
4131 		if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
4132 			xfs_iext_direct_to_inline(ifp, new_size /
4133 				(uint)sizeof(xfs_bmbt_rec_t));
4134 			ifp->if_bytes = new_size;
4135 			return;
4136 		}
4137 		if ((new_size & (new_size - 1)) != 0) {
4138 			rnew_size = xfs_iroundup(new_size);
4139 		}
4140 		if (rnew_size != ifp->if_real_bytes) {
4141 			ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
4142 				kmem_realloc(ifp->if_u1.if_extents,
4143 						rnew_size,
4144 						ifp->if_real_bytes,
4145 						KM_SLEEP);
4146 		}
4147 		if (rnew_size > ifp->if_real_bytes) {
4148 			memset(&ifp->if_u1.if_extents[ifp->if_bytes /
4149 				(uint)sizeof(xfs_bmbt_rec_t)], 0,
4150 				rnew_size - ifp->if_real_bytes);
4151 		}
4152 	}
4153 	/*
4154 	 * Switch from the inline extent buffer to a direct
4155 	 * extent list. Be sure to include the inline extent
4156 	 * bytes in new_size.
4157 	 */
4158 	else {
4159 		new_size += ifp->if_bytes;
4160 		if ((new_size & (new_size - 1)) != 0) {
4161 			rnew_size = xfs_iroundup(new_size);
4162 		}
4163 		xfs_iext_inline_to_direct(ifp, rnew_size);
4164 	}
4165 	ifp->if_real_bytes = rnew_size;
4166 	ifp->if_bytes = new_size;
4167 }
4168 
4169 /*
4170  * Switch from linear (direct) extent records to inline buffer.
4171  */
4172 void
4173 xfs_iext_direct_to_inline(
4174 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4175 	xfs_extnum_t	nextents)	/* number of extents in file */
4176 {
4177 	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
4178 	ASSERT(nextents <= XFS_INLINE_EXTS);
4179 	/*
4180 	 * The inline buffer was zeroed when we switched
4181 	 * from inline to direct extent allocation mode,
4182 	 * so we don't need to clear it here.
4183 	 */
4184 	memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
4185 		nextents * sizeof(xfs_bmbt_rec_t));
4186 	kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
4187 	ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
4188 	ifp->if_real_bytes = 0;
4189 }
4190 
4191 /*
4192  * Switch from inline buffer to linear (direct) extent records.
4193  * new_size should already be rounded up to the next power of 2
4194  * by the caller (when appropriate), so use new_size as it is.
4195  * However, since new_size may be rounded up, we can't update
4196  * if_bytes here. It is the caller's responsibility to update
4197  * if_bytes upon return.
4198  */
4199 void
4200 xfs_iext_inline_to_direct(
4201 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4202 	int		new_size)	/* number of extents in file */
4203 {
4204 	ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
4205 		kmem_alloc(new_size, KM_SLEEP);
4206 	memset(ifp->if_u1.if_extents, 0, new_size);
4207 	if (ifp->if_bytes) {
4208 		memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
4209 			ifp->if_bytes);
4210 		memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
4211 			sizeof(xfs_bmbt_rec_t));
4212 	}
4213 	ifp->if_real_bytes = new_size;
4214 }
4215 
4216 /*
4217  * Resize an extent indirection array to new_size bytes.
4218  */
4219 void
4220 xfs_iext_realloc_indirect(
4221 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4222 	int		new_size)	/* new indirection array size */
4223 {
4224 	int		nlists;		/* number of irec's (ex lists) */
4225 	int		size;		/* current indirection array size */
4226 
4227 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4228 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4229 	size = nlists * sizeof(xfs_ext_irec_t);
4230 	ASSERT(ifp->if_real_bytes);
4231 	ASSERT((new_size >= 0) && (new_size != size));
4232 	if (new_size == 0) {
4233 		xfs_iext_destroy(ifp);
4234 	} else {
4235 		ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
4236 			kmem_realloc(ifp->if_u1.if_ext_irec,
4237 				new_size, size, KM_SLEEP);
4238 	}
4239 }
4240 
4241 /*
4242  * Switch from indirection array to linear (direct) extent allocations.
4243  */
4244 void
4245 xfs_iext_indirect_to_direct(
4246 	 xfs_ifork_t	*ifp)		/* inode fork pointer */
4247 {
4248 	xfs_bmbt_rec_t	*ep;		/* extent record pointer */
4249 	xfs_extnum_t	nextents;	/* number of extents in file */
4250 	int		size;		/* size of file extents */
4251 
4252 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4253 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4254 	ASSERT(nextents <= XFS_LINEAR_EXTS);
4255 	size = nextents * sizeof(xfs_bmbt_rec_t);
4256 
4257 	xfs_iext_irec_compact_full(ifp);
4258 	ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
4259 
4260 	ep = ifp->if_u1.if_ext_irec->er_extbuf;
4261 	kmem_free(ifp->if_u1.if_ext_irec, sizeof(xfs_ext_irec_t));
4262 	ifp->if_flags &= ~XFS_IFEXTIREC;
4263 	ifp->if_u1.if_extents = ep;
4264 	ifp->if_bytes = size;
4265 	if (nextents < XFS_LINEAR_EXTS) {
4266 		xfs_iext_realloc_direct(ifp, size);
4267 	}
4268 }
4269 
4270 /*
4271  * Free incore file extents.
4272  */
4273 void
4274 xfs_iext_destroy(
4275 	xfs_ifork_t	*ifp)		/* inode fork pointer */
4276 {
4277 	if (ifp->if_flags & XFS_IFEXTIREC) {
4278 		int	erp_idx;
4279 		int	nlists;
4280 
4281 		nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4282 		for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
4283 			xfs_iext_irec_remove(ifp, erp_idx);
4284 		}
4285 		ifp->if_flags &= ~XFS_IFEXTIREC;
4286 	} else if (ifp->if_real_bytes) {
4287 		kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
4288 	} else if (ifp->if_bytes) {
4289 		memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
4290 			sizeof(xfs_bmbt_rec_t));
4291 	}
4292 	ifp->if_u1.if_extents = NULL;
4293 	ifp->if_real_bytes = 0;
4294 	ifp->if_bytes = 0;
4295 }
4296 
4297 /*
4298  * Return a pointer to the extent record for file system block bno.
4299  */
4300 xfs_bmbt_rec_t *			/* pointer to found extent record */
4301 xfs_iext_bno_to_ext(
4302 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4303 	xfs_fileoff_t	bno,		/* block number to search for */
4304 	xfs_extnum_t	*idxp)		/* index of target extent */
4305 {
4306 	xfs_bmbt_rec_t	*base;		/* pointer to first extent */
4307 	xfs_filblks_t	blockcount = 0;	/* number of blocks in extent */
4308 	xfs_bmbt_rec_t	*ep = NULL;	/* pointer to target extent */
4309 	xfs_ext_irec_t	*erp = NULL;	/* indirection array pointer */
4310 	int		high;		/* upper boundary in search */
4311 	xfs_extnum_t	idx = 0;	/* index of target extent */
4312 	int		low;		/* lower boundary in search */
4313 	xfs_extnum_t	nextents;	/* number of file extents */
4314 	xfs_fileoff_t	startoff = 0;	/* start offset of extent */
4315 
4316 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4317 	if (nextents == 0) {
4318 		*idxp = 0;
4319 		return NULL;
4320 	}
4321 	low = 0;
4322 	if (ifp->if_flags & XFS_IFEXTIREC) {
4323 		/* Find target extent list */
4324 		int	erp_idx = 0;
4325 		erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
4326 		base = erp->er_extbuf;
4327 		high = erp->er_extcount - 1;
4328 	} else {
4329 		base = ifp->if_u1.if_extents;
4330 		high = nextents - 1;
4331 	}
4332 	/* Binary search extent records */
4333 	while (low <= high) {
4334 		idx = (low + high) >> 1;
4335 		ep = base + idx;
4336 		startoff = xfs_bmbt_get_startoff(ep);
4337 		blockcount = xfs_bmbt_get_blockcount(ep);
4338 		if (bno < startoff) {
4339 			high = idx - 1;
4340 		} else if (bno >= startoff + blockcount) {
4341 			low = idx + 1;
4342 		} else {
4343 			/* Convert back to file-based extent index */
4344 			if (ifp->if_flags & XFS_IFEXTIREC) {
4345 				idx += erp->er_extoff;
4346 			}
4347 			*idxp = idx;
4348 			return ep;
4349 		}
4350 	}
4351 	/* Convert back to file-based extent index */
4352 	if (ifp->if_flags & XFS_IFEXTIREC) {
4353 		idx += erp->er_extoff;
4354 	}
4355 	if (bno >= startoff + blockcount) {
4356 		if (++idx == nextents) {
4357 			ep = NULL;
4358 		} else {
4359 			ep = xfs_iext_get_ext(ifp, idx);
4360 		}
4361 	}
4362 	*idxp = idx;
4363 	return ep;
4364 }
4365 
4366 /*
4367  * Return a pointer to the indirection array entry containing the
4368  * extent record for filesystem block bno. Store the index of the
4369  * target irec in *erp_idxp.
4370  */
4371 xfs_ext_irec_t *			/* pointer to found extent record */
4372 xfs_iext_bno_to_irec(
4373 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4374 	xfs_fileoff_t	bno,		/* block number to search for */
4375 	int		*erp_idxp)	/* irec index of target ext list */
4376 {
4377 	xfs_ext_irec_t	*erp = NULL;	/* indirection array pointer */
4378 	xfs_ext_irec_t	*erp_next;	/* next indirection array entry */
4379 	int		erp_idx;	/* indirection array index */
4380 	int		nlists;		/* number of extent irec's (lists) */
4381 	int		high;		/* binary search upper limit */
4382 	int		low;		/* binary search lower limit */
4383 
4384 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4385 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4386 	erp_idx = 0;
4387 	low = 0;
4388 	high = nlists - 1;
4389 	while (low <= high) {
4390 		erp_idx = (low + high) >> 1;
4391 		erp = &ifp->if_u1.if_ext_irec[erp_idx];
4392 		erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
4393 		if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
4394 			high = erp_idx - 1;
4395 		} else if (erp_next && bno >=
4396 			   xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
4397 			low = erp_idx + 1;
4398 		} else {
4399 			break;
4400 		}
4401 	}
4402 	*erp_idxp = erp_idx;
4403 	return erp;
4404 }
4405 
4406 /*
4407  * Return a pointer to the indirection array entry containing the
4408  * extent record at file extent index *idxp. Store the index of the
4409  * target irec in *erp_idxp and store the page index of the target
4410  * extent record in *idxp.
4411  */
4412 xfs_ext_irec_t *
4413 xfs_iext_idx_to_irec(
4414 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4415 	xfs_extnum_t	*idxp,		/* extent index (file -> page) */
4416 	int		*erp_idxp,	/* pointer to target irec */
4417 	int		realloc)	/* new bytes were just added */
4418 {
4419 	xfs_ext_irec_t	*prev;		/* pointer to previous irec */
4420 	xfs_ext_irec_t	*erp = NULL;	/* pointer to current irec */
4421 	int		erp_idx;	/* indirection array index */
4422 	int		nlists;		/* number of irec's (ex lists) */
4423 	int		high;		/* binary search upper limit */
4424 	int		low;		/* binary search lower limit */
4425 	xfs_extnum_t	page_idx = *idxp; /* extent index in target list */
4426 
4427 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4428 	ASSERT(page_idx >= 0 && page_idx <=
4429 		ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
4430 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4431 	erp_idx = 0;
4432 	low = 0;
4433 	high = nlists - 1;
4434 
4435 	/* Binary search extent irec's */
4436 	while (low <= high) {
4437 		erp_idx = (low + high) >> 1;
4438 		erp = &ifp->if_u1.if_ext_irec[erp_idx];
4439 		prev = erp_idx > 0 ? erp - 1 : NULL;
4440 		if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
4441 		     realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
4442 			high = erp_idx - 1;
4443 		} else if (page_idx > erp->er_extoff + erp->er_extcount ||
4444 			   (page_idx == erp->er_extoff + erp->er_extcount &&
4445 			    !realloc)) {
4446 			low = erp_idx + 1;
4447 		} else if (page_idx == erp->er_extoff + erp->er_extcount &&
4448 			   erp->er_extcount == XFS_LINEAR_EXTS) {
4449 			ASSERT(realloc);
4450 			page_idx = 0;
4451 			erp_idx++;
4452 			erp = erp_idx < nlists ? erp + 1 : NULL;
4453 			break;
4454 		} else {
4455 			page_idx -= erp->er_extoff;
4456 			break;
4457 		}
4458 	}
4459 	*idxp = page_idx;
4460 	*erp_idxp = erp_idx;
4461 	return(erp);
4462 }
4463 
4464 /*
4465  * Allocate and initialize an indirection array once the space needed
4466  * for incore extents increases above XFS_IEXT_BUFSZ.
4467  */
4468 void
4469 xfs_iext_irec_init(
4470 	xfs_ifork_t	*ifp)		/* inode fork pointer */
4471 {
4472 	xfs_ext_irec_t	*erp;		/* indirection array pointer */
4473 	xfs_extnum_t	nextents;	/* number of extents in file */
4474 
4475 	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4476 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4477 	ASSERT(nextents <= XFS_LINEAR_EXTS);
4478 
4479 	erp = (xfs_ext_irec_t *)
4480 		kmem_alloc(sizeof(xfs_ext_irec_t), KM_SLEEP);
4481 
4482 	if (nextents == 0) {
4483 		ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
4484 			kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
4485 	} else if (!ifp->if_real_bytes) {
4486 		xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
4487 	} else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
4488 		xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
4489 	}
4490 	erp->er_extbuf = ifp->if_u1.if_extents;
4491 	erp->er_extcount = nextents;
4492 	erp->er_extoff = 0;
4493 
4494 	ifp->if_flags |= XFS_IFEXTIREC;
4495 	ifp->if_real_bytes = XFS_IEXT_BUFSZ;
4496 	ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
4497 	ifp->if_u1.if_ext_irec = erp;
4498 
4499 	return;
4500 }
4501 
4502 /*
4503  * Allocate and initialize a new entry in the indirection array.
4504  */
4505 xfs_ext_irec_t *
4506 xfs_iext_irec_new(
4507 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4508 	int		erp_idx)	/* index for new irec */
4509 {
4510 	xfs_ext_irec_t	*erp;		/* indirection array pointer */
4511 	int		i;		/* loop counter */
4512 	int		nlists;		/* number of irec's (ex lists) */
4513 
4514 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4515 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4516 
4517 	/* Resize indirection array */
4518 	xfs_iext_realloc_indirect(ifp, ++nlists *
4519 				  sizeof(xfs_ext_irec_t));
4520 	/*
4521 	 * Move records down in the array so the
4522 	 * new page can use erp_idx.
4523 	 */
4524 	erp = ifp->if_u1.if_ext_irec;
4525 	for (i = nlists - 1; i > erp_idx; i--) {
4526 		memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
4527 	}
4528 	ASSERT(i == erp_idx);
4529 
4530 	/* Initialize new extent record */
4531 	erp = ifp->if_u1.if_ext_irec;
4532 	erp[erp_idx].er_extbuf = (xfs_bmbt_rec_t *)
4533 		kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
4534 	ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4535 	memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
4536 	erp[erp_idx].er_extcount = 0;
4537 	erp[erp_idx].er_extoff = erp_idx > 0 ?
4538 		erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
4539 	return (&erp[erp_idx]);
4540 }
4541 
4542 /*
4543  * Remove a record from the indirection array.
4544  */
4545 void
4546 xfs_iext_irec_remove(
4547 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4548 	int		erp_idx)	/* irec index to remove */
4549 {
4550 	xfs_ext_irec_t	*erp;		/* indirection array pointer */
4551 	int		i;		/* loop counter */
4552 	int		nlists;		/* number of irec's (ex lists) */
4553 
4554 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4555 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4556 	erp = &ifp->if_u1.if_ext_irec[erp_idx];
4557 	if (erp->er_extbuf) {
4558 		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
4559 			-erp->er_extcount);
4560 		kmem_free(erp->er_extbuf, XFS_IEXT_BUFSZ);
4561 	}
4562 	/* Compact extent records */
4563 	erp = ifp->if_u1.if_ext_irec;
4564 	for (i = erp_idx; i < nlists - 1; i++) {
4565 		memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
4566 	}
4567 	/*
4568 	 * Manually free the last extent record from the indirection
4569 	 * array.  A call to xfs_iext_realloc_indirect() with a size
4570 	 * of zero would result in a call to xfs_iext_destroy() which
4571 	 * would in turn call this function again, creating a nasty
4572 	 * infinite loop.
4573 	 */
4574 	if (--nlists) {
4575 		xfs_iext_realloc_indirect(ifp,
4576 			nlists * sizeof(xfs_ext_irec_t));
4577 	} else {
4578 		kmem_free(ifp->if_u1.if_ext_irec,
4579 			sizeof(xfs_ext_irec_t));
4580 	}
4581 	ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4582 }
4583 
4584 /*
4585  * This is called to clean up large amounts of unused memory allocated
4586  * by the indirection array.  Before compacting anything though, verify
4587  * that the indirection array is still needed and switch back to the
4588  * linear extent list (or even the inline buffer) if possible.  The
4589  * compaction policy is as follows:
4590  *
4591  *    Full Compaction: Extents fit into a single page (or inline buffer)
4592  *    Full Compaction: Extents occupy less than 10% of allocated space
4593  * Partial Compaction: Extents occupy > 10% and < 50% of allocated space
4594  *      No Compaction: Extents occupy at least 50% of allocated space
4595  */
4596 void
4597 xfs_iext_irec_compact(
4598 	xfs_ifork_t	*ifp)		/* inode fork pointer */
4599 {
4600 	xfs_extnum_t	nextents;	/* number of extents in file */
4601 	int		nlists;		/* number of irec's (ex lists) */
4602 
4603 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4604 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4605 	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4606 
4607 	if (nextents == 0) {
4608 		xfs_iext_destroy(ifp);
4609 	} else if (nextents <= XFS_INLINE_EXTS) {
4610 		xfs_iext_indirect_to_direct(ifp);
4611 		xfs_iext_direct_to_inline(ifp, nextents);
4612 	} else if (nextents <= XFS_LINEAR_EXTS) {
4613 		xfs_iext_indirect_to_direct(ifp);
4614 	} else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 3) {
4615 		xfs_iext_irec_compact_full(ifp);
4616 	} else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
4617 		xfs_iext_irec_compact_pages(ifp);
4618 	}
4619 }
4620 
4621 /*
4622  * Combine extents from neighboring extent pages.
4623  */
4624 void
4625 xfs_iext_irec_compact_pages(
4626 	xfs_ifork_t	*ifp)		/* inode fork pointer */
4627 {
4628 	xfs_ext_irec_t	*erp, *erp_next;/* pointers to irec entries */
4629 	int		erp_idx = 0;	/* indirection array index */
4630 	int		nlists;		/* number of irec's (ex lists) */
4631 
4632 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4633 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4634 	while (erp_idx < nlists - 1) {
4635 		erp = &ifp->if_u1.if_ext_irec[erp_idx];
4636 		erp_next = erp + 1;
4637 		if (erp_next->er_extcount <=
4638 		    (XFS_LINEAR_EXTS - erp->er_extcount)) {
4639 			memmove(&erp->er_extbuf[erp->er_extcount],
4640 				erp_next->er_extbuf, erp_next->er_extcount *
4641 				sizeof(xfs_bmbt_rec_t));
4642 			erp->er_extcount += erp_next->er_extcount;
4643 			/*
4644 			 * Free page before removing extent record
4645 			 * so er_extoffs don't get modified in
4646 			 * xfs_iext_irec_remove.
4647 			 */
4648 			kmem_free(erp_next->er_extbuf, XFS_IEXT_BUFSZ);
4649 			erp_next->er_extbuf = NULL;
4650 			xfs_iext_irec_remove(ifp, erp_idx + 1);
4651 			nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4652 		} else {
4653 			erp_idx++;
4654 		}
4655 	}
4656 }
4657 
4658 /*
4659  * Fully compact the extent records managed by the indirection array.
4660  */
4661 void
4662 xfs_iext_irec_compact_full(
4663 	xfs_ifork_t	*ifp)			/* inode fork pointer */
4664 {
4665 	xfs_bmbt_rec_t	*ep, *ep_next;		/* extent record pointers */
4666 	xfs_ext_irec_t	*erp, *erp_next;	/* extent irec pointers */
4667 	int		erp_idx = 0;		/* extent irec index */
4668 	int		ext_avail;		/* empty entries in ex list */
4669 	int		ext_diff;		/* number of exts to add */
4670 	int		nlists;			/* number of irec's (ex lists) */
4671 
4672 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4673 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4674 	erp = ifp->if_u1.if_ext_irec;
4675 	ep = &erp->er_extbuf[erp->er_extcount];
4676 	erp_next = erp + 1;
4677 	ep_next = erp_next->er_extbuf;
4678 	while (erp_idx < nlists - 1) {
4679 		ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
4680 		ext_diff = MIN(ext_avail, erp_next->er_extcount);
4681 		memcpy(ep, ep_next, ext_diff * sizeof(xfs_bmbt_rec_t));
4682 		erp->er_extcount += ext_diff;
4683 		erp_next->er_extcount -= ext_diff;
4684 		/* Remove next page */
4685 		if (erp_next->er_extcount == 0) {
4686 			/*
4687 			 * Free page before removing extent record
4688 			 * so er_extoffs don't get modified in
4689 			 * xfs_iext_irec_remove.
4690 			 */
4691 			kmem_free(erp_next->er_extbuf,
4692 				erp_next->er_extcount * sizeof(xfs_bmbt_rec_t));
4693 			erp_next->er_extbuf = NULL;
4694 			xfs_iext_irec_remove(ifp, erp_idx + 1);
4695 			erp = &ifp->if_u1.if_ext_irec[erp_idx];
4696 			nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4697 		/* Update next page */
4698 		} else {
4699 			/* Move rest of page up to become next new page */
4700 			memmove(erp_next->er_extbuf, ep_next,
4701 				erp_next->er_extcount * sizeof(xfs_bmbt_rec_t));
4702 			ep_next = erp_next->er_extbuf;
4703 			memset(&ep_next[erp_next->er_extcount], 0,
4704 				(XFS_LINEAR_EXTS - erp_next->er_extcount) *
4705 				sizeof(xfs_bmbt_rec_t));
4706 		}
4707 		if (erp->er_extcount == XFS_LINEAR_EXTS) {
4708 			erp_idx++;
4709 			if (erp_idx < nlists)
4710 				erp = &ifp->if_u1.if_ext_irec[erp_idx];
4711 			else
4712 				break;
4713 		}
4714 		ep = &erp->er_extbuf[erp->er_extcount];
4715 		erp_next = erp + 1;
4716 		ep_next = erp_next->er_extbuf;
4717 	}
4718 }
4719 
4720 /*
4721  * This is called to update the er_extoff field in the indirection
4722  * array when extents have been added or removed from one of the
4723  * extent lists. erp_idx contains the irec index to begin updating
4724  * at and ext_diff contains the number of extents that were added
4725  * or removed.
4726  */
4727 void
4728 xfs_iext_irec_update_extoffs(
4729 	xfs_ifork_t	*ifp,		/* inode fork pointer */
4730 	int		erp_idx,	/* irec index to update */
4731 	int		ext_diff)	/* number of new extents */
4732 {
4733 	int		i;		/* loop counter */
4734 	int		nlists;		/* number of irec's (ex lists */
4735 
4736 	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4737 	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4738 	for (i = erp_idx; i < nlists; i++) {
4739 		ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;
4740 	}
4741 }
4742