xref: /linux/fs/xfs/libxfs/xfs_inode_buf.c (revision 856e7c4b619af622d56b3b454f7bec32a170ac99)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_defer.h"
14 #include "xfs_inode.h"
15 #include "xfs_errortag.h"
16 #include "xfs_error.h"
17 #include "xfs_cksum.h"
18 #include "xfs_icache.h"
19 #include "xfs_trans.h"
20 #include "xfs_ialloc.h"
21 #include "xfs_dir2.h"
22 
23 #include <linux/iversion.h>
24 
25 /*
26  * Check that none of the inode's in the buffer have a next
27  * unlinked field of 0.
28  */
29 #if defined(DEBUG)
30 void
31 xfs_inobp_check(
32 	xfs_mount_t	*mp,
33 	xfs_buf_t	*bp)
34 {
35 	int		i;
36 	int		j;
37 	xfs_dinode_t	*dip;
38 
39 	j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
40 
41 	for (i = 0; i < j; i++) {
42 		dip = xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize);
43 		if (!dip->di_next_unlinked)  {
44 			xfs_alert(mp,
45 	"Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.",
46 				i, (long long)bp->b_bn);
47 		}
48 	}
49 }
50 #endif
51 
52 bool
53 xfs_dinode_good_version(
54 	struct xfs_mount *mp,
55 	__u8		version)
56 {
57 	if (xfs_sb_version_hascrc(&mp->m_sb))
58 		return version == 3;
59 
60 	return version == 1 || version == 2;
61 }
62 
63 /*
64  * If we are doing readahead on an inode buffer, we might be in log recovery
65  * reading an inode allocation buffer that hasn't yet been replayed, and hence
66  * has not had the inode cores stamped into it. Hence for readahead, the buffer
67  * may be potentially invalid.
68  *
69  * If the readahead buffer is invalid, we need to mark it with an error and
70  * clear the DONE status of the buffer so that a followup read will re-read it
71  * from disk. We don't report the error otherwise to avoid warnings during log
72  * recovery and we don't get unnecssary panics on debug kernels. We use EIO here
73  * because all we want to do is say readahead failed; there is no-one to report
74  * the error to, so this will distinguish it from a non-ra verifier failure.
75  * Changes to this readahead error behavour also need to be reflected in
76  * xfs_dquot_buf_readahead_verify().
77  */
78 static void
79 xfs_inode_buf_verify(
80 	struct xfs_buf	*bp,
81 	bool		readahead)
82 {
83 	struct xfs_mount *mp = bp->b_target->bt_mount;
84 	xfs_agnumber_t	agno;
85 	int		i;
86 	int		ni;
87 
88 	/*
89 	 * Validate the magic number and version of every inode in the buffer
90 	 */
91 	agno = xfs_daddr_to_agno(mp, XFS_BUF_ADDR(bp));
92 	ni = XFS_BB_TO_FSB(mp, bp->b_length) * mp->m_sb.sb_inopblock;
93 	for (i = 0; i < ni; i++) {
94 		int		di_ok;
95 		xfs_dinode_t	*dip;
96 		xfs_agino_t	unlinked_ino;
97 
98 		dip = xfs_buf_offset(bp, (i << mp->m_sb.sb_inodelog));
99 		unlinked_ino = be32_to_cpu(dip->di_next_unlinked);
100 		di_ok = dip->di_magic == cpu_to_be16(XFS_DINODE_MAGIC) &&
101 			xfs_dinode_good_version(mp, dip->di_version) &&
102 			(unlinked_ino == NULLAGINO ||
103 			 xfs_verify_agino(mp, agno, unlinked_ino));
104 		if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
105 						XFS_ERRTAG_ITOBP_INOTOBP))) {
106 			if (readahead) {
107 				bp->b_flags &= ~XBF_DONE;
108 				xfs_buf_ioerror(bp, -EIO);
109 				return;
110 			}
111 
112 #ifdef DEBUG
113 			xfs_alert(mp,
114 				"bad inode magic/vsn daddr %lld #%d (magic=%x)",
115 				(unsigned long long)bp->b_bn, i,
116 				be16_to_cpu(dip->di_magic));
117 #endif
118 			xfs_buf_verifier_error(bp, -EFSCORRUPTED,
119 					__func__, dip, sizeof(*dip),
120 					NULL);
121 			return;
122 		}
123 	}
124 }
125 
126 
127 static void
128 xfs_inode_buf_read_verify(
129 	struct xfs_buf	*bp)
130 {
131 	xfs_inode_buf_verify(bp, false);
132 }
133 
134 static void
135 xfs_inode_buf_readahead_verify(
136 	struct xfs_buf	*bp)
137 {
138 	xfs_inode_buf_verify(bp, true);
139 }
140 
141 static void
142 xfs_inode_buf_write_verify(
143 	struct xfs_buf	*bp)
144 {
145 	xfs_inode_buf_verify(bp, false);
146 }
147 
148 const struct xfs_buf_ops xfs_inode_buf_ops = {
149 	.name = "xfs_inode",
150 	.verify_read = xfs_inode_buf_read_verify,
151 	.verify_write = xfs_inode_buf_write_verify,
152 };
153 
154 const struct xfs_buf_ops xfs_inode_buf_ra_ops = {
155 	.name = "xxfs_inode_ra",
156 	.verify_read = xfs_inode_buf_readahead_verify,
157 	.verify_write = xfs_inode_buf_write_verify,
158 };
159 
160 
161 /*
162  * This routine is called to map an inode to the buffer containing the on-disk
163  * version of the inode.  It returns a pointer to the buffer containing the
164  * on-disk inode in the bpp parameter, and in the dipp parameter it returns a
165  * pointer to the on-disk inode within that buffer.
166  *
167  * If a non-zero error is returned, then the contents of bpp and dipp are
168  * undefined.
169  */
170 int
171 xfs_imap_to_bp(
172 	struct xfs_mount	*mp,
173 	struct xfs_trans	*tp,
174 	struct xfs_imap		*imap,
175 	struct xfs_dinode       **dipp,
176 	struct xfs_buf		**bpp,
177 	uint			buf_flags,
178 	uint			iget_flags)
179 {
180 	struct xfs_buf		*bp;
181 	int			error;
182 
183 	buf_flags |= XBF_UNMAPPED;
184 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
185 				   (int)imap->im_len, buf_flags, &bp,
186 				   &xfs_inode_buf_ops);
187 	if (error) {
188 		if (error == -EAGAIN) {
189 			ASSERT(buf_flags & XBF_TRYLOCK);
190 			return error;
191 		}
192 		xfs_warn(mp, "%s: xfs_trans_read_buf() returned error %d.",
193 			__func__, error);
194 		return error;
195 	}
196 
197 	*bpp = bp;
198 	*dipp = xfs_buf_offset(bp, imap->im_boffset);
199 	return 0;
200 }
201 
202 void
203 xfs_inode_from_disk(
204 	struct xfs_inode	*ip,
205 	struct xfs_dinode	*from)
206 {
207 	struct xfs_icdinode	*to = &ip->i_d;
208 	struct inode		*inode = VFS_I(ip);
209 
210 
211 	/*
212 	 * Convert v1 inodes immediately to v2 inode format as this is the
213 	 * minimum inode version format we support in the rest of the code.
214 	 */
215 	to->di_version = from->di_version;
216 	if (to->di_version == 1) {
217 		set_nlink(inode, be16_to_cpu(from->di_onlink));
218 		to->di_projid_lo = 0;
219 		to->di_projid_hi = 0;
220 		to->di_version = 2;
221 	} else {
222 		set_nlink(inode, be32_to_cpu(from->di_nlink));
223 		to->di_projid_lo = be16_to_cpu(from->di_projid_lo);
224 		to->di_projid_hi = be16_to_cpu(from->di_projid_hi);
225 	}
226 
227 	to->di_format = from->di_format;
228 	to->di_uid = be32_to_cpu(from->di_uid);
229 	to->di_gid = be32_to_cpu(from->di_gid);
230 	to->di_flushiter = be16_to_cpu(from->di_flushiter);
231 
232 	/*
233 	 * Time is signed, so need to convert to signed 32 bit before
234 	 * storing in inode timestamp which may be 64 bit. Otherwise
235 	 * a time before epoch is converted to a time long after epoch
236 	 * on 64 bit systems.
237 	 */
238 	inode->i_atime.tv_sec = (int)be32_to_cpu(from->di_atime.t_sec);
239 	inode->i_atime.tv_nsec = (int)be32_to_cpu(from->di_atime.t_nsec);
240 	inode->i_mtime.tv_sec = (int)be32_to_cpu(from->di_mtime.t_sec);
241 	inode->i_mtime.tv_nsec = (int)be32_to_cpu(from->di_mtime.t_nsec);
242 	inode->i_ctime.tv_sec = (int)be32_to_cpu(from->di_ctime.t_sec);
243 	inode->i_ctime.tv_nsec = (int)be32_to_cpu(from->di_ctime.t_nsec);
244 	inode->i_generation = be32_to_cpu(from->di_gen);
245 	inode->i_mode = be16_to_cpu(from->di_mode);
246 
247 	to->di_size = be64_to_cpu(from->di_size);
248 	to->di_nblocks = be64_to_cpu(from->di_nblocks);
249 	to->di_extsize = be32_to_cpu(from->di_extsize);
250 	to->di_nextents = be32_to_cpu(from->di_nextents);
251 	to->di_anextents = be16_to_cpu(from->di_anextents);
252 	to->di_forkoff = from->di_forkoff;
253 	to->di_aformat	= from->di_aformat;
254 	to->di_dmevmask	= be32_to_cpu(from->di_dmevmask);
255 	to->di_dmstate	= be16_to_cpu(from->di_dmstate);
256 	to->di_flags	= be16_to_cpu(from->di_flags);
257 
258 	if (to->di_version == 3) {
259 		inode_set_iversion_queried(inode,
260 					   be64_to_cpu(from->di_changecount));
261 		to->di_crtime.t_sec = be32_to_cpu(from->di_crtime.t_sec);
262 		to->di_crtime.t_nsec = be32_to_cpu(from->di_crtime.t_nsec);
263 		to->di_flags2 = be64_to_cpu(from->di_flags2);
264 		to->di_cowextsize = be32_to_cpu(from->di_cowextsize);
265 	}
266 }
267 
268 void
269 xfs_inode_to_disk(
270 	struct xfs_inode	*ip,
271 	struct xfs_dinode	*to,
272 	xfs_lsn_t		lsn)
273 {
274 	struct xfs_icdinode	*from = &ip->i_d;
275 	struct inode		*inode = VFS_I(ip);
276 
277 	to->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
278 	to->di_onlink = 0;
279 
280 	to->di_version = from->di_version;
281 	to->di_format = from->di_format;
282 	to->di_uid = cpu_to_be32(from->di_uid);
283 	to->di_gid = cpu_to_be32(from->di_gid);
284 	to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
285 	to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
286 
287 	memset(to->di_pad, 0, sizeof(to->di_pad));
288 	to->di_atime.t_sec = cpu_to_be32(inode->i_atime.tv_sec);
289 	to->di_atime.t_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
290 	to->di_mtime.t_sec = cpu_to_be32(inode->i_mtime.tv_sec);
291 	to->di_mtime.t_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
292 	to->di_ctime.t_sec = cpu_to_be32(inode->i_ctime.tv_sec);
293 	to->di_ctime.t_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
294 	to->di_nlink = cpu_to_be32(inode->i_nlink);
295 	to->di_gen = cpu_to_be32(inode->i_generation);
296 	to->di_mode = cpu_to_be16(inode->i_mode);
297 
298 	to->di_size = cpu_to_be64(from->di_size);
299 	to->di_nblocks = cpu_to_be64(from->di_nblocks);
300 	to->di_extsize = cpu_to_be32(from->di_extsize);
301 	to->di_nextents = cpu_to_be32(from->di_nextents);
302 	to->di_anextents = cpu_to_be16(from->di_anextents);
303 	to->di_forkoff = from->di_forkoff;
304 	to->di_aformat = from->di_aformat;
305 	to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
306 	to->di_dmstate = cpu_to_be16(from->di_dmstate);
307 	to->di_flags = cpu_to_be16(from->di_flags);
308 
309 	if (from->di_version == 3) {
310 		to->di_changecount = cpu_to_be64(inode_peek_iversion(inode));
311 		to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
312 		to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
313 		to->di_flags2 = cpu_to_be64(from->di_flags2);
314 		to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
315 		to->di_ino = cpu_to_be64(ip->i_ino);
316 		to->di_lsn = cpu_to_be64(lsn);
317 		memset(to->di_pad2, 0, sizeof(to->di_pad2));
318 		uuid_copy(&to->di_uuid, &ip->i_mount->m_sb.sb_meta_uuid);
319 		to->di_flushiter = 0;
320 	} else {
321 		to->di_flushiter = cpu_to_be16(from->di_flushiter);
322 	}
323 }
324 
325 void
326 xfs_log_dinode_to_disk(
327 	struct xfs_log_dinode	*from,
328 	struct xfs_dinode	*to)
329 {
330 	to->di_magic = cpu_to_be16(from->di_magic);
331 	to->di_mode = cpu_to_be16(from->di_mode);
332 	to->di_version = from->di_version;
333 	to->di_format = from->di_format;
334 	to->di_onlink = 0;
335 	to->di_uid = cpu_to_be32(from->di_uid);
336 	to->di_gid = cpu_to_be32(from->di_gid);
337 	to->di_nlink = cpu_to_be32(from->di_nlink);
338 	to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
339 	to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
340 	memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
341 
342 	to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
343 	to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
344 	to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
345 	to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
346 	to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
347 	to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
348 
349 	to->di_size = cpu_to_be64(from->di_size);
350 	to->di_nblocks = cpu_to_be64(from->di_nblocks);
351 	to->di_extsize = cpu_to_be32(from->di_extsize);
352 	to->di_nextents = cpu_to_be32(from->di_nextents);
353 	to->di_anextents = cpu_to_be16(from->di_anextents);
354 	to->di_forkoff = from->di_forkoff;
355 	to->di_aformat = from->di_aformat;
356 	to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
357 	to->di_dmstate = cpu_to_be16(from->di_dmstate);
358 	to->di_flags = cpu_to_be16(from->di_flags);
359 	to->di_gen = cpu_to_be32(from->di_gen);
360 
361 	if (from->di_version == 3) {
362 		to->di_changecount = cpu_to_be64(from->di_changecount);
363 		to->di_crtime.t_sec = cpu_to_be32(from->di_crtime.t_sec);
364 		to->di_crtime.t_nsec = cpu_to_be32(from->di_crtime.t_nsec);
365 		to->di_flags2 = cpu_to_be64(from->di_flags2);
366 		to->di_cowextsize = cpu_to_be32(from->di_cowextsize);
367 		to->di_ino = cpu_to_be64(from->di_ino);
368 		to->di_lsn = cpu_to_be64(from->di_lsn);
369 		memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
370 		uuid_copy(&to->di_uuid, &from->di_uuid);
371 		to->di_flushiter = 0;
372 	} else {
373 		to->di_flushiter = cpu_to_be16(from->di_flushiter);
374 	}
375 }
376 
377 xfs_failaddr_t
378 xfs_dinode_verify(
379 	struct xfs_mount	*mp,
380 	xfs_ino_t		ino,
381 	struct xfs_dinode	*dip)
382 {
383 	xfs_failaddr_t		fa;
384 	uint16_t		mode;
385 	uint16_t		flags;
386 	uint64_t		flags2;
387 	uint64_t		di_size;
388 
389 	if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
390 		return __this_address;
391 
392 	/* Verify v3 integrity information first */
393 	if (dip->di_version >= 3) {
394 		if (!xfs_sb_version_hascrc(&mp->m_sb))
395 			return __this_address;
396 		if (!xfs_verify_cksum((char *)dip, mp->m_sb.sb_inodesize,
397 				      XFS_DINODE_CRC_OFF))
398 			return __this_address;
399 		if (be64_to_cpu(dip->di_ino) != ino)
400 			return __this_address;
401 		if (!uuid_equal(&dip->di_uuid, &mp->m_sb.sb_meta_uuid))
402 			return __this_address;
403 	}
404 
405 	/* don't allow invalid i_size */
406 	di_size = be64_to_cpu(dip->di_size);
407 	if (di_size & (1ULL << 63))
408 		return __this_address;
409 
410 	mode = be16_to_cpu(dip->di_mode);
411 	if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
412 		return __this_address;
413 
414 	/* No zero-length symlinks/dirs. */
415 	if ((S_ISLNK(mode) || S_ISDIR(mode)) && di_size == 0)
416 		return __this_address;
417 
418 	/* Fork checks carried over from xfs_iformat_fork */
419 	if (mode &&
420 	    be32_to_cpu(dip->di_nextents) + be16_to_cpu(dip->di_anextents) >
421 			be64_to_cpu(dip->di_nblocks))
422 		return __this_address;
423 
424 	if (mode && XFS_DFORK_BOFF(dip) > mp->m_sb.sb_inodesize)
425 		return __this_address;
426 
427 	flags = be16_to_cpu(dip->di_flags);
428 
429 	if (mode && (flags & XFS_DIFLAG_REALTIME) && !mp->m_rtdev_targp)
430 		return __this_address;
431 
432 	/* Do we have appropriate data fork formats for the mode? */
433 	switch (mode & S_IFMT) {
434 	case S_IFIFO:
435 	case S_IFCHR:
436 	case S_IFBLK:
437 	case S_IFSOCK:
438 		if (dip->di_format != XFS_DINODE_FMT_DEV)
439 			return __this_address;
440 		break;
441 	case S_IFREG:
442 	case S_IFLNK:
443 	case S_IFDIR:
444 		switch (dip->di_format) {
445 		case XFS_DINODE_FMT_LOCAL:
446 			/*
447 			 * no local regular files yet
448 			 */
449 			if (S_ISREG(mode))
450 				return __this_address;
451 			if (di_size > XFS_DFORK_DSIZE(dip, mp))
452 				return __this_address;
453 			if (dip->di_nextents)
454 				return __this_address;
455 			/* fall through */
456 		case XFS_DINODE_FMT_EXTENTS:
457 		case XFS_DINODE_FMT_BTREE:
458 			break;
459 		default:
460 			return __this_address;
461 		}
462 		break;
463 	case 0:
464 		/* Uninitialized inode ok. */
465 		break;
466 	default:
467 		return __this_address;
468 	}
469 
470 	if (XFS_DFORK_Q(dip)) {
471 		switch (dip->di_aformat) {
472 		case XFS_DINODE_FMT_LOCAL:
473 			if (dip->di_anextents)
474 				return __this_address;
475 		/* fall through */
476 		case XFS_DINODE_FMT_EXTENTS:
477 		case XFS_DINODE_FMT_BTREE:
478 			break;
479 		default:
480 			return __this_address;
481 		}
482 	} else {
483 		/*
484 		 * If there is no fork offset, this may be a freshly-made inode
485 		 * in a new disk cluster, in which case di_aformat is zeroed.
486 		 * Otherwise, such an inode must be in EXTENTS format; this goes
487 		 * for freed inodes as well.
488 		 */
489 		switch (dip->di_aformat) {
490 		case 0:
491 		case XFS_DINODE_FMT_EXTENTS:
492 			break;
493 		default:
494 			return __this_address;
495 		}
496 		if (dip->di_anextents)
497 			return __this_address;
498 	}
499 
500 	/* extent size hint validation */
501 	fa = xfs_inode_validate_extsize(mp, be32_to_cpu(dip->di_extsize),
502 			mode, flags);
503 	if (fa)
504 		return fa;
505 
506 	/* only version 3 or greater inodes are extensively verified here */
507 	if (dip->di_version < 3)
508 		return NULL;
509 
510 	flags2 = be64_to_cpu(dip->di_flags2);
511 
512 	/* don't allow reflink/cowextsize if we don't have reflink */
513 	if ((flags2 & (XFS_DIFLAG2_REFLINK | XFS_DIFLAG2_COWEXTSIZE)) &&
514 	     !xfs_sb_version_hasreflink(&mp->m_sb))
515 		return __this_address;
516 
517 	/* only regular files get reflink */
518 	if ((flags2 & XFS_DIFLAG2_REFLINK) && (mode & S_IFMT) != S_IFREG)
519 		return __this_address;
520 
521 	/* don't let reflink and realtime mix */
522 	if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags & XFS_DIFLAG_REALTIME))
523 		return __this_address;
524 
525 	/* don't let reflink and dax mix */
526 	if ((flags2 & XFS_DIFLAG2_REFLINK) && (flags2 & XFS_DIFLAG2_DAX))
527 		return __this_address;
528 
529 	/* COW extent size hint validation */
530 	fa = xfs_inode_validate_cowextsize(mp, be32_to_cpu(dip->di_cowextsize),
531 			mode, flags, flags2);
532 	if (fa)
533 		return fa;
534 
535 	return NULL;
536 }
537 
538 void
539 xfs_dinode_calc_crc(
540 	struct xfs_mount	*mp,
541 	struct xfs_dinode	*dip)
542 {
543 	uint32_t		crc;
544 
545 	if (dip->di_version < 3)
546 		return;
547 
548 	ASSERT(xfs_sb_version_hascrc(&mp->m_sb));
549 	crc = xfs_start_cksum_update((char *)dip, mp->m_sb.sb_inodesize,
550 			      XFS_DINODE_CRC_OFF);
551 	dip->di_crc = xfs_end_cksum(crc);
552 }
553 
554 /*
555  * Read the disk inode attributes into the in-core inode structure.
556  *
557  * For version 5 superblocks, if we are initialising a new inode and we are not
558  * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
559  * inode core with a random generation number. If we are keeping inodes around,
560  * we need to read the inode cluster to get the existing generation number off
561  * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
562  * format) then log recovery is dependent on the di_flushiter field being
563  * initialised from the current on-disk value and hence we must also read the
564  * inode off disk.
565  */
566 int
567 xfs_iread(
568 	xfs_mount_t	*mp,
569 	xfs_trans_t	*tp,
570 	xfs_inode_t	*ip,
571 	uint		iget_flags)
572 {
573 	xfs_buf_t	*bp;
574 	xfs_dinode_t	*dip;
575 	xfs_failaddr_t	fa;
576 	int		error;
577 
578 	/*
579 	 * Fill in the location information in the in-core inode.
580 	 */
581 	error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, iget_flags);
582 	if (error)
583 		return error;
584 
585 	/* shortcut IO on inode allocation if possible */
586 	if ((iget_flags & XFS_IGET_CREATE) &&
587 	    xfs_sb_version_hascrc(&mp->m_sb) &&
588 	    !(mp->m_flags & XFS_MOUNT_IKEEP)) {
589 		/* initialise the on-disk inode core */
590 		memset(&ip->i_d, 0, sizeof(ip->i_d));
591 		VFS_I(ip)->i_generation = prandom_u32();
592 		ip->i_d.di_version = 3;
593 		return 0;
594 	}
595 
596 	/*
597 	 * Get pointers to the on-disk inode and the buffer containing it.
598 	 */
599 	error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0, iget_flags);
600 	if (error)
601 		return error;
602 
603 	/* even unallocated inodes are verified */
604 	fa = xfs_dinode_verify(mp, ip->i_ino, dip);
605 	if (fa) {
606 		xfs_inode_verifier_error(ip, -EFSCORRUPTED, "dinode", dip,
607 				sizeof(*dip), fa);
608 		error = -EFSCORRUPTED;
609 		goto out_brelse;
610 	}
611 
612 	/*
613 	 * If the on-disk inode is already linked to a directory
614 	 * entry, copy all of the inode into the in-core inode.
615 	 * xfs_iformat_fork() handles copying in the inode format
616 	 * specific information.
617 	 * Otherwise, just get the truly permanent information.
618 	 */
619 	if (dip->di_mode) {
620 		xfs_inode_from_disk(ip, dip);
621 		error = xfs_iformat_fork(ip, dip);
622 		if (error)  {
623 #ifdef DEBUG
624 			xfs_alert(mp, "%s: xfs_iformat() returned error %d",
625 				__func__, error);
626 #endif /* DEBUG */
627 			goto out_brelse;
628 		}
629 	} else {
630 		/*
631 		 * Partial initialisation of the in-core inode. Just the bits
632 		 * that xfs_ialloc won't overwrite or relies on being correct.
633 		 */
634 		ip->i_d.di_version = dip->di_version;
635 		VFS_I(ip)->i_generation = be32_to_cpu(dip->di_gen);
636 		ip->i_d.di_flushiter = be16_to_cpu(dip->di_flushiter);
637 
638 		/*
639 		 * Make sure to pull in the mode here as well in
640 		 * case the inode is released without being used.
641 		 * This ensures that xfs_inactive() will see that
642 		 * the inode is already free and not try to mess
643 		 * with the uninitialized part of it.
644 		 */
645 		VFS_I(ip)->i_mode = 0;
646 	}
647 
648 	ASSERT(ip->i_d.di_version >= 2);
649 	ip->i_delayed_blks = 0;
650 
651 	/*
652 	 * Mark the buffer containing the inode as something to keep
653 	 * around for a while.  This helps to keep recently accessed
654 	 * meta-data in-core longer.
655 	 */
656 	xfs_buf_set_ref(bp, XFS_INO_REF);
657 
658 	/*
659 	 * Use xfs_trans_brelse() to release the buffer containing the on-disk
660 	 * inode, because it was acquired with xfs_trans_read_buf() in
661 	 * xfs_imap_to_bp() above.  If tp is NULL, this is just a normal
662 	 * brelse().  If we're within a transaction, then xfs_trans_brelse()
663 	 * will only release the buffer if it is not dirty within the
664 	 * transaction.  It will be OK to release the buffer in this case,
665 	 * because inodes on disk are never destroyed and we will be locking the
666 	 * new in-core inode before putting it in the cache where other
667 	 * processes can find it.  Thus we don't have to worry about the inode
668 	 * being changed just because we released the buffer.
669 	 */
670  out_brelse:
671 	xfs_trans_brelse(tp, bp);
672 	return error;
673 }
674 
675 /*
676  * Validate di_extsize hint.
677  *
678  * The rules are documented at xfs_ioctl_setattr_check_extsize().
679  * These functions must be kept in sync with each other.
680  */
681 xfs_failaddr_t
682 xfs_inode_validate_extsize(
683 	struct xfs_mount		*mp,
684 	uint32_t			extsize,
685 	uint16_t			mode,
686 	uint16_t			flags)
687 {
688 	bool				rt_flag;
689 	bool				hint_flag;
690 	bool				inherit_flag;
691 	uint32_t			extsize_bytes;
692 	uint32_t			blocksize_bytes;
693 
694 	rt_flag = (flags & XFS_DIFLAG_REALTIME);
695 	hint_flag = (flags & XFS_DIFLAG_EXTSIZE);
696 	inherit_flag = (flags & XFS_DIFLAG_EXTSZINHERIT);
697 	extsize_bytes = XFS_FSB_TO_B(mp, extsize);
698 
699 	if (rt_flag)
700 		blocksize_bytes = mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog;
701 	else
702 		blocksize_bytes = mp->m_sb.sb_blocksize;
703 
704 	if ((hint_flag || inherit_flag) && !(S_ISDIR(mode) || S_ISREG(mode)))
705 		return __this_address;
706 
707 	if (hint_flag && !S_ISREG(mode))
708 		return __this_address;
709 
710 	if (inherit_flag && !S_ISDIR(mode))
711 		return __this_address;
712 
713 	if ((hint_flag || inherit_flag) && extsize == 0)
714 		return __this_address;
715 
716 	if (!(hint_flag || inherit_flag) && extsize != 0)
717 		return __this_address;
718 
719 	if (extsize_bytes % blocksize_bytes)
720 		return __this_address;
721 
722 	if (extsize > MAXEXTLEN)
723 		return __this_address;
724 
725 	if (!rt_flag && extsize > mp->m_sb.sb_agblocks / 2)
726 		return __this_address;
727 
728 	return NULL;
729 }
730 
731 /*
732  * Validate di_cowextsize hint.
733  *
734  * The rules are documented at xfs_ioctl_setattr_check_cowextsize().
735  * These functions must be kept in sync with each other.
736  */
737 xfs_failaddr_t
738 xfs_inode_validate_cowextsize(
739 	struct xfs_mount		*mp,
740 	uint32_t			cowextsize,
741 	uint16_t			mode,
742 	uint16_t			flags,
743 	uint64_t			flags2)
744 {
745 	bool				rt_flag;
746 	bool				hint_flag;
747 	uint32_t			cowextsize_bytes;
748 
749 	rt_flag = (flags & XFS_DIFLAG_REALTIME);
750 	hint_flag = (flags2 & XFS_DIFLAG2_COWEXTSIZE);
751 	cowextsize_bytes = XFS_FSB_TO_B(mp, cowextsize);
752 
753 	if (hint_flag && !xfs_sb_version_hasreflink(&mp->m_sb))
754 		return __this_address;
755 
756 	if (hint_flag && !(S_ISDIR(mode) || S_ISREG(mode)))
757 		return __this_address;
758 
759 	if (hint_flag && cowextsize == 0)
760 		return __this_address;
761 
762 	if (!hint_flag && cowextsize != 0)
763 		return __this_address;
764 
765 	if (hint_flag && rt_flag)
766 		return __this_address;
767 
768 	if (cowextsize_bytes % mp->m_sb.sb_blocksize)
769 		return __this_address;
770 
771 	if (cowextsize > MAXEXTLEN)
772 		return __this_address;
773 
774 	if (cowextsize > mp->m_sb.sb_agblocks / 2)
775 		return __this_address;
776 
777 	return NULL;
778 }
779