xref: /linux/fs/xfs/xfs_bmap_util.c (revision c148bc7535650fbfa95a1f571b9ffa2ab478ea33)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4  * Copyright (c) 2012 Red Hat, Inc.
5  * All Rights Reserved.
6  */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_bit.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
17 #include "xfs_btree.h"
18 #include "xfs_trans.h"
19 #include "xfs_alloc.h"
20 #include "xfs_bmap.h"
21 #include "xfs_bmap_util.h"
22 #include "xfs_bmap_btree.h"
23 #include "xfs_rtalloc.h"
24 #include "xfs_error.h"
25 #include "xfs_quota.h"
26 #include "xfs_trans_space.h"
27 #include "xfs_trace.h"
28 #include "xfs_icache.h"
29 #include "xfs_iomap.h"
30 #include "xfs_reflink.h"
31 #include "xfs_rtbitmap.h"
32 #include "xfs_rtgroup.h"
33 #include "xfs_zone_alloc.h"
34 
35 /* Kernel only BMAP related definitions and functions */
36 
37 /*
38  * Convert the given file system block to a disk block.  We have to treat it
39  * differently based on whether the file is a real time file or not, because the
40  * bmap code does.
41  */
42 xfs_daddr_t
xfs_fsb_to_db(struct xfs_inode * ip,xfs_fsblock_t fsb)43 xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
44 {
45 	if (XFS_IS_REALTIME_INODE(ip))
46 		return xfs_rtb_to_daddr(ip->i_mount, fsb);
47 	return XFS_FSB_TO_DADDR(ip->i_mount, fsb);
48 }
49 
50 /*
51  * Routine to zero an extent on disk allocated to the specific inode.
52  */
53 int
xfs_zero_extent(struct xfs_inode * ip,xfs_fsblock_t start_fsb,xfs_off_t count_fsb)54 xfs_zero_extent(
55 	struct xfs_inode	*ip,
56 	xfs_fsblock_t		start_fsb,
57 	xfs_off_t		count_fsb)
58 {
59 	return blkdev_issue_zeroout(xfs_inode_buftarg(ip)->bt_bdev,
60 			xfs_fsb_to_db(ip, start_fsb),
61 			XFS_FSB_TO_BB(ip->i_mount, count_fsb),
62 			GFP_KERNEL, 0);
63 }
64 
65 /*
66  * Extent tree block counting routines.
67  */
68 
69 /*
70  * Count leaf blocks given a range of extent records.  Delayed allocation
71  * extents are not counted towards the totals.
72  */
73 xfs_extnum_t
xfs_bmap_count_leaves(struct xfs_ifork * ifp,xfs_filblks_t * count)74 xfs_bmap_count_leaves(
75 	struct xfs_ifork	*ifp,
76 	xfs_filblks_t		*count)
77 {
78 	struct xfs_iext_cursor	icur;
79 	struct xfs_bmbt_irec	got;
80 	xfs_extnum_t		numrecs = 0;
81 
82 	for_each_xfs_iext(ifp, &icur, &got) {
83 		if (!isnullstartblock(got.br_startblock)) {
84 			*count += got.br_blockcount;
85 			numrecs++;
86 		}
87 	}
88 
89 	return numrecs;
90 }
91 
92 /*
93  * Count fsblocks of the given fork.  Delayed allocation extents are
94  * not counted towards the totals.
95  */
96 int
xfs_bmap_count_blocks(struct xfs_trans * tp,struct xfs_inode * ip,int whichfork,xfs_extnum_t * nextents,xfs_filblks_t * count)97 xfs_bmap_count_blocks(
98 	struct xfs_trans	*tp,
99 	struct xfs_inode	*ip,
100 	int			whichfork,
101 	xfs_extnum_t		*nextents,
102 	xfs_filblks_t		*count)
103 {
104 	struct xfs_mount	*mp = ip->i_mount;
105 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
106 	struct xfs_btree_cur	*cur;
107 	xfs_filblks_t		btblocks = 0;
108 	int			error;
109 
110 	*nextents = 0;
111 	*count = 0;
112 
113 	if (!ifp)
114 		return 0;
115 
116 	switch (ifp->if_format) {
117 	case XFS_DINODE_FMT_BTREE:
118 		error = xfs_iread_extents(tp, ip, whichfork);
119 		if (error)
120 			return error;
121 
122 		cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork);
123 		error = xfs_btree_count_blocks(cur, &btblocks);
124 		xfs_btree_del_cursor(cur, error);
125 		if (error)
126 			return error;
127 
128 		/*
129 		 * xfs_btree_count_blocks includes the root block contained in
130 		 * the inode fork in @btblocks, so subtract one because we're
131 		 * only interested in allocated disk blocks.
132 		 */
133 		*count += btblocks - 1;
134 
135 		fallthrough;
136 	case XFS_DINODE_FMT_EXTENTS:
137 		*nextents = xfs_bmap_count_leaves(ifp, count);
138 		break;
139 	}
140 
141 	return 0;
142 }
143 
144 static int
xfs_getbmap_report_one(struct xfs_inode * ip,struct getbmapx * bmv,struct kgetbmap * out,int64_t bmv_end,struct xfs_bmbt_irec * got)145 xfs_getbmap_report_one(
146 	struct xfs_inode	*ip,
147 	struct getbmapx		*bmv,
148 	struct kgetbmap		*out,
149 	int64_t			bmv_end,
150 	struct xfs_bmbt_irec	*got)
151 {
152 	struct kgetbmap		*p = out + bmv->bmv_entries;
153 	bool			shared = false;
154 	int			error;
155 
156 	error = xfs_reflink_trim_around_shared(ip, got, &shared);
157 	if (error)
158 		return error;
159 
160 	if (isnullstartblock(got->br_startblock) ||
161 	    got->br_startblock == DELAYSTARTBLOCK) {
162 		/*
163 		 * Take the flush completion as being a point-in-time snapshot
164 		 * where there are no delalloc extents, and if any new ones
165 		 * have been created racily, just skip them as being 'after'
166 		 * the flush and so don't get reported.
167 		 */
168 		if (!(bmv->bmv_iflags & BMV_IF_DELALLOC))
169 			return 0;
170 
171 		p->bmv_oflags |= BMV_OF_DELALLOC;
172 		p->bmv_block = -2;
173 	} else {
174 		p->bmv_block = xfs_fsb_to_db(ip, got->br_startblock);
175 	}
176 
177 	if (got->br_state == XFS_EXT_UNWRITTEN &&
178 	    (bmv->bmv_iflags & BMV_IF_PREALLOC))
179 		p->bmv_oflags |= BMV_OF_PREALLOC;
180 
181 	if (shared)
182 		p->bmv_oflags |= BMV_OF_SHARED;
183 
184 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, got->br_startoff);
185 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, got->br_blockcount);
186 
187 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
188 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
189 	bmv->bmv_entries++;
190 	return 0;
191 }
192 
193 static void
xfs_getbmap_report_hole(struct xfs_inode * ip,struct getbmapx * bmv,struct kgetbmap * out,int64_t bmv_end,xfs_fileoff_t bno,xfs_fileoff_t end)194 xfs_getbmap_report_hole(
195 	struct xfs_inode	*ip,
196 	struct getbmapx		*bmv,
197 	struct kgetbmap		*out,
198 	int64_t			bmv_end,
199 	xfs_fileoff_t		bno,
200 	xfs_fileoff_t		end)
201 {
202 	struct kgetbmap		*p = out + bmv->bmv_entries;
203 
204 	if (bmv->bmv_iflags & BMV_IF_NO_HOLES)
205 		return;
206 
207 	p->bmv_block = -1;
208 	p->bmv_offset = XFS_FSB_TO_BB(ip->i_mount, bno);
209 	p->bmv_length = XFS_FSB_TO_BB(ip->i_mount, end - bno);
210 
211 	bmv->bmv_offset = p->bmv_offset + p->bmv_length;
212 	bmv->bmv_length = max(0LL, bmv_end - bmv->bmv_offset);
213 	bmv->bmv_entries++;
214 }
215 
216 static inline bool
xfs_getbmap_full(struct getbmapx * bmv)217 xfs_getbmap_full(
218 	struct getbmapx		*bmv)
219 {
220 	return bmv->bmv_length == 0 || bmv->bmv_entries >= bmv->bmv_count - 1;
221 }
222 
223 static bool
xfs_getbmap_next_rec(struct xfs_bmbt_irec * rec,xfs_fileoff_t total_end)224 xfs_getbmap_next_rec(
225 	struct xfs_bmbt_irec	*rec,
226 	xfs_fileoff_t		total_end)
227 {
228 	xfs_fileoff_t		end = rec->br_startoff + rec->br_blockcount;
229 
230 	if (end == total_end)
231 		return false;
232 
233 	rec->br_startoff += rec->br_blockcount;
234 	if (!isnullstartblock(rec->br_startblock) &&
235 	    rec->br_startblock != DELAYSTARTBLOCK)
236 		rec->br_startblock += rec->br_blockcount;
237 	rec->br_blockcount = total_end - end;
238 	return true;
239 }
240 
241 /*
242  * Get inode's extents as described in bmv, and format for output.
243  * Calls formatter to fill the user's buffer until all extents
244  * are mapped, until the passed-in bmv->bmv_count slots have
245  * been filled, or until the formatter short-circuits the loop,
246  * if it is tracking filled-in extents on its own.
247  */
248 int						/* error code */
xfs_getbmap(struct xfs_inode * ip,struct getbmapx * bmv,struct kgetbmap * out)249 xfs_getbmap(
250 	struct xfs_inode	*ip,
251 	struct getbmapx		*bmv,		/* user bmap structure */
252 	struct kgetbmap		*out)
253 {
254 	struct xfs_mount	*mp = ip->i_mount;
255 	int			iflags = bmv->bmv_iflags;
256 	int			whichfork, lock, error = 0;
257 	int64_t			bmv_end, max_len;
258 	xfs_fileoff_t		bno, first_bno;
259 	struct xfs_ifork	*ifp;
260 	struct xfs_bmbt_irec	got, rec;
261 	xfs_filblks_t		len;
262 	struct xfs_iext_cursor	icur;
263 
264 	if (bmv->bmv_iflags & ~BMV_IF_VALID)
265 		return -EINVAL;
266 #ifndef DEBUG
267 	/* Only allow CoW fork queries if we're debugging. */
268 	if (iflags & BMV_IF_COWFORK)
269 		return -EINVAL;
270 #endif
271 	if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
272 		return -EINVAL;
273 
274 	if (bmv->bmv_length < -1)
275 		return -EINVAL;
276 	bmv->bmv_entries = 0;
277 	if (bmv->bmv_length == 0)
278 		return 0;
279 
280 	if (iflags & BMV_IF_ATTRFORK)
281 		whichfork = XFS_ATTR_FORK;
282 	else if (iflags & BMV_IF_COWFORK)
283 		whichfork = XFS_COW_FORK;
284 	else
285 		whichfork = XFS_DATA_FORK;
286 
287 	xfs_ilock(ip, XFS_IOLOCK_SHARED);
288 	switch (whichfork) {
289 	case XFS_ATTR_FORK:
290 		lock = xfs_ilock_attr_map_shared(ip);
291 		if (!xfs_inode_has_attr_fork(ip))
292 			goto out_unlock_ilock;
293 
294 		max_len = 1LL << 32;
295 		break;
296 	case XFS_COW_FORK:
297 		lock = XFS_ILOCK_SHARED;
298 		xfs_ilock(ip, lock);
299 
300 		/* No CoW fork? Just return */
301 		if (!xfs_ifork_ptr(ip, whichfork))
302 			goto out_unlock_ilock;
303 
304 		if (xfs_get_cowextsz_hint(ip))
305 			max_len = mp->m_super->s_maxbytes;
306 		else
307 			max_len = XFS_ISIZE(ip);
308 		break;
309 	case XFS_DATA_FORK:
310 		if (!(iflags & BMV_IF_DELALLOC) &&
311 		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_disk_size)) {
312 			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
313 			if (error)
314 				goto out_unlock_iolock;
315 
316 			/*
317 			 * Even after flushing the inode, there can still be
318 			 * delalloc blocks on the inode beyond EOF due to
319 			 * speculative preallocation.  These are not removed
320 			 * until the release function is called or the inode
321 			 * is inactivated.  Hence we cannot assert here that
322 			 * ip->i_delayed_blks == 0.
323 			 */
324 		}
325 
326 		if (xfs_get_extsz_hint(ip) ||
327 		    (ip->i_diflags & XFS_DIFLAG_PREALLOC))
328 			max_len = mp->m_super->s_maxbytes;
329 		else
330 			max_len = XFS_ISIZE(ip);
331 
332 		lock = xfs_ilock_data_map_shared(ip);
333 		break;
334 	}
335 
336 	ifp = xfs_ifork_ptr(ip, whichfork);
337 
338 	switch (ifp->if_format) {
339 	case XFS_DINODE_FMT_EXTENTS:
340 	case XFS_DINODE_FMT_BTREE:
341 		break;
342 	case XFS_DINODE_FMT_LOCAL:
343 		/* Local format inode forks report no extents. */
344 		goto out_unlock_ilock;
345 	default:
346 		error = -EINVAL;
347 		goto out_unlock_ilock;
348 	}
349 
350 	if (bmv->bmv_length == -1) {
351 		max_len = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, max_len));
352 		bmv->bmv_length = max(0LL, max_len - bmv->bmv_offset);
353 	}
354 
355 	bmv_end = bmv->bmv_offset + bmv->bmv_length;
356 
357 	first_bno = bno = XFS_BB_TO_FSBT(mp, bmv->bmv_offset);
358 	len = XFS_BB_TO_FSB(mp, bmv->bmv_length);
359 
360 	error = xfs_iread_extents(NULL, ip, whichfork);
361 	if (error)
362 		goto out_unlock_ilock;
363 
364 	if (!xfs_iext_lookup_extent(ip, ifp, bno, &icur, &got)) {
365 		/*
366 		 * Report a whole-file hole if the delalloc flag is set to
367 		 * stay compatible with the old implementation.
368 		 */
369 		if (iflags & BMV_IF_DELALLOC)
370 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
371 					XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
372 		goto out_unlock_ilock;
373 	}
374 
375 	while (!xfs_getbmap_full(bmv)) {
376 		xfs_trim_extent(&got, first_bno, len);
377 
378 		/*
379 		 * Report an entry for a hole if this extent doesn't directly
380 		 * follow the previous one.
381 		 */
382 		if (got.br_startoff > bno) {
383 			xfs_getbmap_report_hole(ip, bmv, out, bmv_end, bno,
384 					got.br_startoff);
385 			if (xfs_getbmap_full(bmv))
386 				break;
387 		}
388 
389 		/*
390 		 * In order to report shared extents accurately, we report each
391 		 * distinct shared / unshared part of a single bmbt record with
392 		 * an individual getbmapx record.
393 		 */
394 		bno = got.br_startoff + got.br_blockcount;
395 		rec = got;
396 		do {
397 			error = xfs_getbmap_report_one(ip, bmv, out, bmv_end,
398 					&rec);
399 			if (error || xfs_getbmap_full(bmv))
400 				goto out_unlock_ilock;
401 		} while (xfs_getbmap_next_rec(&rec, bno));
402 
403 		if (!xfs_iext_next_extent(ifp, &icur, &got)) {
404 			xfs_fileoff_t	end = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
405 
406 			if (bmv->bmv_entries > 0)
407 				out[bmv->bmv_entries - 1].bmv_oflags |=
408 								BMV_OF_LAST;
409 
410 			if (whichfork != XFS_ATTR_FORK && bno < end &&
411 			    !xfs_getbmap_full(bmv)) {
412 				xfs_getbmap_report_hole(ip, bmv, out, bmv_end,
413 						bno, end);
414 			}
415 			break;
416 		}
417 
418 		if (bno >= first_bno + len)
419 			break;
420 	}
421 
422 out_unlock_ilock:
423 	xfs_iunlock(ip, lock);
424 out_unlock_iolock:
425 	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
426 	return error;
427 }
428 
429 /*
430  * Dead simple method of punching delalyed allocation blocks from a range in
431  * the inode.  This will always punch out both the start and end blocks, even
432  * if the ranges only partially overlap them, so it is up to the caller to
433  * ensure that partial blocks are not passed in.
434  */
435 void
xfs_bmap_punch_delalloc_range(struct xfs_inode * ip,int whichfork,xfs_off_t start_byte,xfs_off_t end_byte,struct xfs_zone_alloc_ctx * ac)436 xfs_bmap_punch_delalloc_range(
437 	struct xfs_inode	*ip,
438 	int			whichfork,
439 	xfs_off_t		start_byte,
440 	xfs_off_t		end_byte,
441 	struct xfs_zone_alloc_ctx *ac)
442 {
443 	struct xfs_mount	*mp = ip->i_mount;
444 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, whichfork);
445 	xfs_fileoff_t		start_fsb = XFS_B_TO_FSBT(mp, start_byte);
446 	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, end_byte);
447 	struct xfs_bmbt_irec	got, del;
448 	struct xfs_iext_cursor	icur;
449 
450 	ASSERT(!xfs_need_iread_extents(ifp));
451 
452 	xfs_ilock(ip, XFS_ILOCK_EXCL);
453 	if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
454 		goto out_unlock;
455 
456 	while (got.br_startoff + got.br_blockcount > start_fsb) {
457 		del = got;
458 		xfs_trim_extent(&del, start_fsb, end_fsb - start_fsb);
459 
460 		/*
461 		 * A delete can push the cursor forward. Step back to the
462 		 * previous extent on non-delalloc or extents outside the
463 		 * target range.
464 		 */
465 		if (!del.br_blockcount ||
466 		    !isnullstartblock(del.br_startblock)) {
467 			if (!xfs_iext_prev_extent(ifp, &icur, &got))
468 				break;
469 			continue;
470 		}
471 
472 		if (xfs_is_zoned_inode(ip) && ac) {
473 			/*
474 			 * In a zoned buffered write context we need to return
475 			 * the punched delalloc allocations to the allocation
476 			 * context.  This allows reusing them in the following
477 			 * iomap iterations.
478 			 */
479 			xfs_bmap_del_extent_delay(ip, whichfork, &icur, &got,
480 					&del, XFS_BMAPI_REMAP);
481 			ac->reserved_blocks += del.br_blockcount;
482 		} else {
483 			xfs_bmap_del_extent_delay(ip, whichfork, &icur, &got,
484 					&del, 0);
485 		}
486 
487 		if (!xfs_iext_get_extent(ifp, &icur, &got))
488 			break;
489 	}
490 
491 	if (whichfork == XFS_COW_FORK && !ifp->if_bytes)
492 		xfs_inode_clear_cowblocks_tag(ip);
493 
494 out_unlock:
495 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
496 }
497 
498 /*
499  * Test whether it is appropriate to check an inode for and free post EOF
500  * blocks.
501  */
502 bool
xfs_can_free_eofblocks(struct xfs_inode * ip)503 xfs_can_free_eofblocks(
504 	struct xfs_inode	*ip)
505 {
506 	struct xfs_mount	*mp = ip->i_mount;
507 	bool			found_blocks = false;
508 	xfs_fileoff_t		end_fsb;
509 	xfs_fileoff_t		last_fsb;
510 	struct xfs_bmbt_irec	imap;
511 	struct xfs_iext_cursor	icur;
512 
513 	/*
514 	 * Caller must either hold the exclusive io lock; or be inactivating
515 	 * the inode, which guarantees there are no other users of the inode.
516 	 */
517 	if (!(VFS_I(ip)->i_state & I_FREEING))
518 		xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL);
519 
520 	/* prealloc/delalloc exists only on regular files */
521 	if (!S_ISREG(VFS_I(ip)->i_mode))
522 		return false;
523 
524 	/*
525 	 * Zero sized files with no cached pages and delalloc blocks will not
526 	 * have speculative prealloc/delalloc blocks to remove.
527 	 */
528 	if (VFS_I(ip)->i_size == 0 &&
529 	    VFS_I(ip)->i_mapping->nrpages == 0 &&
530 	    ip->i_delayed_blks == 0)
531 		return false;
532 
533 	/* If we haven't read in the extent list, then don't do it now. */
534 	if (xfs_need_iread_extents(&ip->i_df))
535 		return false;
536 
537 	/*
538 	 * Do not free real extents in preallocated files unless the file has
539 	 * delalloc blocks and we are forced to remove them.
540 	 */
541 	if ((ip->i_diflags & XFS_DIFLAG_PREALLOC) && !ip->i_delayed_blks)
542 		return false;
543 
544 	/*
545 	 * Do not try to free post-EOF blocks if EOF is beyond the end of the
546 	 * range supported by the page cache, because the truncation will loop
547 	 * forever.
548 	 */
549 	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
550 	if (xfs_inode_has_bigrtalloc(ip))
551 		end_fsb = xfs_fileoff_roundup_rtx(mp, end_fsb);
552 	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
553 	if (last_fsb <= end_fsb)
554 		return false;
555 
556 	/*
557 	 * Check if there is an post-EOF extent to free.  If there are any
558 	 * delalloc blocks attached to the inode (data fork delalloc
559 	 * reservations or CoW extents of any kind), we need to free them so
560 	 * that inactivation doesn't fail to erase them.
561 	 */
562 	xfs_ilock(ip, XFS_ILOCK_SHARED);
563 	if (ip->i_delayed_blks ||
564 	    xfs_iext_lookup_extent(ip, &ip->i_df, end_fsb, &icur, &imap))
565 		found_blocks = true;
566 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
567 	return found_blocks;
568 }
569 
570 /*
571  * This is called to free any blocks beyond eof. The caller must hold
572  * IOLOCK_EXCL unless we are in the inode reclaim path and have the only
573  * reference to the inode.
574  */
575 int
xfs_free_eofblocks(struct xfs_inode * ip)576 xfs_free_eofblocks(
577 	struct xfs_inode	*ip)
578 {
579 	struct xfs_trans	*tp;
580 	struct xfs_mount	*mp = ip->i_mount;
581 	int			error;
582 
583 	/* Attach the dquots to the inode up front. */
584 	error = xfs_qm_dqattach(ip);
585 	if (error)
586 		return error;
587 
588 	/* Wait on dio to ensure i_size has settled. */
589 	inode_dio_wait(VFS_I(ip));
590 
591 	/*
592 	 * For preallocated files only free delayed allocations.
593 	 *
594 	 * Note that this means we also leave speculative preallocations in
595 	 * place for preallocated files.
596 	 */
597 	if (ip->i_diflags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) {
598 		if (ip->i_delayed_blks) {
599 			xfs_bmap_punch_delalloc_range(ip, XFS_DATA_FORK,
600 				round_up(XFS_ISIZE(ip), mp->m_sb.sb_blocksize),
601 				LLONG_MAX, NULL);
602 		}
603 		xfs_inode_clear_eofblocks_tag(ip);
604 		return 0;
605 	}
606 
607 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
608 	if (error) {
609 		ASSERT(xfs_is_shutdown(mp));
610 		return error;
611 	}
612 
613 	xfs_ilock(ip, XFS_ILOCK_EXCL);
614 	xfs_trans_ijoin(tp, ip, 0);
615 
616 	/*
617 	 * Do not update the on-disk file size.  If we update the on-disk file
618 	 * size and then the system crashes before the contents of the file are
619 	 * flushed to disk then the files may be full of holes (ie NULL files
620 	 * bug).
621 	 */
622 	error = xfs_itruncate_extents_flags(&tp, ip, XFS_DATA_FORK,
623 				XFS_ISIZE(ip), XFS_BMAPI_NODISCARD);
624 	if (error)
625 		goto err_cancel;
626 
627 	error = xfs_trans_commit(tp);
628 	if (error)
629 		goto out_unlock;
630 
631 	xfs_inode_clear_eofblocks_tag(ip);
632 	goto out_unlock;
633 
634 err_cancel:
635 	/*
636 	 * If we get an error at this point we simply don't
637 	 * bother truncating the file.
638 	 */
639 	xfs_trans_cancel(tp);
640 out_unlock:
641 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
642 	return error;
643 }
644 
645 int
xfs_alloc_file_space(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len)646 xfs_alloc_file_space(
647 	struct xfs_inode	*ip,
648 	xfs_off_t		offset,
649 	xfs_off_t		len)
650 {
651 	xfs_mount_t		*mp = ip->i_mount;
652 	xfs_off_t		count;
653 	xfs_filblks_t		allocatesize_fsb;
654 	xfs_extlen_t		extsz, temp;
655 	xfs_fileoff_t		startoffset_fsb;
656 	xfs_fileoff_t		endoffset_fsb;
657 	int			rt;
658 	xfs_trans_t		*tp;
659 	xfs_bmbt_irec_t		imaps[1], *imapp;
660 	int			error;
661 
662 	if (xfs_is_always_cow_inode(ip))
663 		return 0;
664 
665 	trace_xfs_alloc_file_space(ip);
666 
667 	if (xfs_is_shutdown(mp))
668 		return -EIO;
669 
670 	error = xfs_qm_dqattach(ip);
671 	if (error)
672 		return error;
673 
674 	if (len <= 0)
675 		return -EINVAL;
676 
677 	rt = XFS_IS_REALTIME_INODE(ip);
678 	extsz = xfs_get_extsz_hint(ip);
679 
680 	count = len;
681 	imapp = &imaps[0];
682 	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
683 	endoffset_fsb = XFS_B_TO_FSB(mp, offset + count);
684 	allocatesize_fsb = endoffset_fsb - startoffset_fsb;
685 
686 	/*
687 	 * Allocate file space until done or until there is an error
688 	 */
689 	while (allocatesize_fsb && !error) {
690 		xfs_fileoff_t	s, e;
691 		unsigned int	dblocks, rblocks, resblks;
692 		int		nimaps = 1;
693 
694 		/*
695 		 * Determine space reservations for data/realtime.
696 		 */
697 		if (unlikely(extsz)) {
698 			s = startoffset_fsb;
699 			do_div(s, extsz);
700 			s *= extsz;
701 			e = startoffset_fsb + allocatesize_fsb;
702 			div_u64_rem(startoffset_fsb, extsz, &temp);
703 			if (temp)
704 				e += temp;
705 			div_u64_rem(e, extsz, &temp);
706 			if (temp)
707 				e += extsz - temp;
708 		} else {
709 			s = 0;
710 			e = allocatesize_fsb;
711 		}
712 
713 		/*
714 		 * The transaction reservation is limited to a 32-bit block
715 		 * count, hence we need to limit the number of blocks we are
716 		 * trying to reserve to avoid an overflow. We can't allocate
717 		 * more than @nimaps extents, and an extent is limited on disk
718 		 * to XFS_BMBT_MAX_EXTLEN (21 bits), so use that to enforce the
719 		 * limit.
720 		 */
721 		resblks = min_t(xfs_fileoff_t, (e - s),
722 				(XFS_MAX_BMBT_EXTLEN * nimaps));
723 		if (unlikely(rt)) {
724 			dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
725 			rblocks = resblks;
726 		} else {
727 			dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
728 			rblocks = 0;
729 		}
730 
731 		error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
732 				dblocks, rblocks, false, &tp);
733 		if (error)
734 			break;
735 
736 		error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
737 				XFS_IEXT_ADD_NOSPLIT_CNT);
738 		if (error)
739 			goto error;
740 
741 		/*
742 		 * If the allocator cannot find a single free extent large
743 		 * enough to cover the start block of the requested range,
744 		 * xfs_bmapi_write will return -ENOSR.
745 		 *
746 		 * In that case we simply need to keep looping with the same
747 		 * startoffset_fsb so that one of the following allocations
748 		 * will eventually reach the requested range.
749 		 */
750 		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
751 				allocatesize_fsb, XFS_BMAPI_PREALLOC, 0, imapp,
752 				&nimaps);
753 		if (error) {
754 			if (error != -ENOSR)
755 				goto error;
756 			error = 0;
757 		} else {
758 			startoffset_fsb += imapp->br_blockcount;
759 			allocatesize_fsb -= imapp->br_blockcount;
760 		}
761 
762 		ip->i_diflags |= XFS_DIFLAG_PREALLOC;
763 		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
764 
765 		error = xfs_trans_commit(tp);
766 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
767 	}
768 
769 	return error;
770 
771 error:
772 	xfs_trans_cancel(tp);
773 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
774 	return error;
775 }
776 
777 static int
xfs_unmap_extent(struct xfs_inode * ip,xfs_fileoff_t startoffset_fsb,xfs_filblks_t len_fsb,int * done)778 xfs_unmap_extent(
779 	struct xfs_inode	*ip,
780 	xfs_fileoff_t		startoffset_fsb,
781 	xfs_filblks_t		len_fsb,
782 	int			*done)
783 {
784 	struct xfs_mount	*mp = ip->i_mount;
785 	struct xfs_trans	*tp;
786 	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
787 	int			error;
788 
789 	error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
790 			false, &tp);
791 	if (error)
792 		return error;
793 
794 	error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
795 			XFS_IEXT_PUNCH_HOLE_CNT);
796 	if (error)
797 		goto out_trans_cancel;
798 
799 	error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, done);
800 	if (error)
801 		goto out_trans_cancel;
802 
803 	error = xfs_trans_commit(tp);
804 out_unlock:
805 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
806 	return error;
807 
808 out_trans_cancel:
809 	xfs_trans_cancel(tp);
810 	goto out_unlock;
811 }
812 
813 /* Caller must first wait for the completion of any pending DIOs if required. */
814 int
xfs_flush_unmap_range(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len)815 xfs_flush_unmap_range(
816 	struct xfs_inode	*ip,
817 	xfs_off_t		offset,
818 	xfs_off_t		len)
819 {
820 	struct inode		*inode = VFS_I(ip);
821 	xfs_off_t		rounding, start, end;
822 	int			error;
823 
824 	/*
825 	 * Make sure we extend the flush out to extent alignment
826 	 * boundaries so any extent range overlapping the start/end
827 	 * of the modification we are about to do is clean and idle.
828 	 */
829 	rounding = max_t(xfs_off_t, xfs_inode_alloc_unitsize(ip), PAGE_SIZE);
830 	start = rounddown_64(offset, rounding);
831 	end = roundup_64(offset + len, rounding) - 1;
832 
833 	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
834 	if (error)
835 		return error;
836 	truncate_pagecache_range(inode, start, end);
837 	return 0;
838 }
839 
840 int
xfs_free_file_space(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len,struct xfs_zone_alloc_ctx * ac)841 xfs_free_file_space(
842 	struct xfs_inode	*ip,
843 	xfs_off_t		offset,
844 	xfs_off_t		len,
845 	struct xfs_zone_alloc_ctx *ac)
846 {
847 	struct xfs_mount	*mp = ip->i_mount;
848 	xfs_fileoff_t		startoffset_fsb;
849 	xfs_fileoff_t		endoffset_fsb;
850 	int			done = 0, error;
851 
852 	trace_xfs_free_file_space(ip);
853 
854 	error = xfs_qm_dqattach(ip);
855 	if (error)
856 		return error;
857 
858 	if (len <= 0)	/* if nothing being freed */
859 		return 0;
860 
861 	/*
862 	 * Now AIO and DIO has drained we flush and (if necessary) invalidate
863 	 * the cached range over the first operation we are about to run.
864 	 */
865 	error = xfs_flush_unmap_range(ip, offset, len);
866 	if (error)
867 		return error;
868 
869 	startoffset_fsb = XFS_B_TO_FSB(mp, offset);
870 	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
871 
872 	/* We can only free complete realtime extents. */
873 	if (xfs_inode_has_bigrtalloc(ip)) {
874 		startoffset_fsb = xfs_fileoff_roundup_rtx(mp, startoffset_fsb);
875 		endoffset_fsb = xfs_fileoff_rounddown_rtx(mp, endoffset_fsb);
876 	}
877 
878 	/*
879 	 * Need to zero the stuff we're not freeing, on disk.
880 	 */
881 	if (endoffset_fsb > startoffset_fsb) {
882 		while (!done) {
883 			error = xfs_unmap_extent(ip, startoffset_fsb,
884 					endoffset_fsb - startoffset_fsb, &done);
885 			if (error)
886 				return error;
887 		}
888 	}
889 
890 	/*
891 	 * Now that we've unmap all full blocks we'll have to zero out any
892 	 * partial block at the beginning and/or end.  xfs_zero_range is smart
893 	 * enough to skip any holes, including those we just created, but we
894 	 * must take care not to zero beyond EOF and enlarge i_size.
895 	 */
896 	if (offset >= XFS_ISIZE(ip))
897 		return 0;
898 	if (offset + len > XFS_ISIZE(ip))
899 		len = XFS_ISIZE(ip) - offset;
900 	error = xfs_zero_range(ip, offset, len, ac, NULL);
901 	if (error)
902 		return error;
903 
904 	/*
905 	 * If we zeroed right up to EOF and EOF straddles a page boundary we
906 	 * must make sure that the post-EOF area is also zeroed because the
907 	 * page could be mmap'd and xfs_zero_range doesn't do that for us.
908 	 * Writeback of the eof page will do this, albeit clumsily.
909 	 */
910 	if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
911 		error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
912 				round_down(offset + len, PAGE_SIZE), LLONG_MAX);
913 	}
914 
915 	return error;
916 }
917 
918 static int
xfs_prepare_shift(struct xfs_inode * ip,loff_t offset)919 xfs_prepare_shift(
920 	struct xfs_inode	*ip,
921 	loff_t			offset)
922 {
923 	unsigned int		rounding;
924 	int			error;
925 
926 	/*
927 	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
928 	 * into the accessible region of the file.
929 	 */
930 	if (xfs_can_free_eofblocks(ip)) {
931 		error = xfs_free_eofblocks(ip);
932 		if (error)
933 			return error;
934 	}
935 
936 	/*
937 	 * Shift operations must stabilize the start block offset boundary along
938 	 * with the full range of the operation. If we don't, a COW writeback
939 	 * completion could race with an insert, front merge with the start
940 	 * extent (after split) during the shift and corrupt the file. Start
941 	 * with the allocation unit just prior to the start to stabilize the
942 	 * boundary.
943 	 */
944 	rounding = xfs_inode_alloc_unitsize(ip);
945 	offset = rounddown_64(offset, rounding);
946 	if (offset)
947 		offset -= rounding;
948 
949 	/*
950 	 * Writeback and invalidate cache for the remainder of the file as we're
951 	 * about to shift down every extent from offset to EOF.
952 	 */
953 	error = xfs_flush_unmap_range(ip, offset, XFS_ISIZE(ip));
954 	if (error)
955 		return error;
956 
957 	/*
958 	 * Clean out anything hanging around in the cow fork now that
959 	 * we've flushed all the dirty data out to disk to avoid having
960 	 * CoW extents at the wrong offsets.
961 	 */
962 	if (xfs_inode_has_cow_data(ip)) {
963 		error = xfs_reflink_cancel_cow_range(ip, offset, NULLFILEOFF,
964 				true);
965 		if (error)
966 			return error;
967 	}
968 
969 	return 0;
970 }
971 
972 /*
973  * xfs_collapse_file_space()
974  *	This routine frees disk space and shift extent for the given file.
975  *	The first thing we do is to free data blocks in the specified range
976  *	by calling xfs_free_file_space(). It would also sync dirty data
977  *	and invalidate page cache over the region on which collapse range
978  *	is working. And Shift extent records to the left to cover a hole.
979  * RETURNS:
980  *	0 on success
981  *	errno on error
982  *
983  */
984 int
xfs_collapse_file_space(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len,struct xfs_zone_alloc_ctx * ac)985 xfs_collapse_file_space(
986 	struct xfs_inode	*ip,
987 	xfs_off_t		offset,
988 	xfs_off_t		len,
989 	struct xfs_zone_alloc_ctx *ac)
990 {
991 	struct xfs_mount	*mp = ip->i_mount;
992 	struct xfs_trans	*tp;
993 	int			error;
994 	xfs_fileoff_t		next_fsb = XFS_B_TO_FSB(mp, offset + len);
995 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
996 	bool			done = false;
997 
998 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL);
999 
1000 	trace_xfs_collapse_file_space(ip);
1001 
1002 	error = xfs_free_file_space(ip, offset, len, ac);
1003 	if (error)
1004 		return error;
1005 
1006 	error = xfs_prepare_shift(ip, offset);
1007 	if (error)
1008 		return error;
1009 
1010 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
1011 	if (error)
1012 		return error;
1013 
1014 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1015 	xfs_trans_ijoin(tp, ip, 0);
1016 
1017 	while (!done) {
1018 		error = xfs_bmap_collapse_extents(tp, ip, &next_fsb, shift_fsb,
1019 				&done);
1020 		if (error)
1021 			goto out_trans_cancel;
1022 		if (done)
1023 			break;
1024 
1025 		/* finish any deferred frees and roll the transaction */
1026 		error = xfs_defer_finish(&tp);
1027 		if (error)
1028 			goto out_trans_cancel;
1029 	}
1030 
1031 	error = xfs_trans_commit(tp);
1032 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1033 	return error;
1034 
1035 out_trans_cancel:
1036 	xfs_trans_cancel(tp);
1037 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1038 	return error;
1039 }
1040 
1041 /*
1042  * xfs_insert_file_space()
1043  *	This routine create hole space by shifting extents for the given file.
1044  *	The first thing we do is to sync dirty data and invalidate page cache
1045  *	over the region on which insert range is working. And split an extent
1046  *	to two extents at given offset by calling xfs_bmap_split_extent.
1047  *	And shift all extent records which are laying between [offset,
1048  *	last allocated extent] to the right to reserve hole range.
1049  * RETURNS:
1050  *	0 on success
1051  *	errno on error
1052  */
1053 int
xfs_insert_file_space(struct xfs_inode * ip,loff_t offset,loff_t len)1054 xfs_insert_file_space(
1055 	struct xfs_inode	*ip,
1056 	loff_t			offset,
1057 	loff_t			len)
1058 {
1059 	struct xfs_mount	*mp = ip->i_mount;
1060 	struct xfs_trans	*tp;
1061 	int			error;
1062 	xfs_fileoff_t		stop_fsb = XFS_B_TO_FSB(mp, offset);
1063 	xfs_fileoff_t		next_fsb = NULLFSBLOCK;
1064 	xfs_fileoff_t		shift_fsb = XFS_B_TO_FSB(mp, len);
1065 	bool			done = false;
1066 
1067 	xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL);
1068 
1069 	trace_xfs_insert_file_space(ip);
1070 
1071 	error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
1072 	if (error)
1073 		return error;
1074 
1075 	error = xfs_prepare_shift(ip, offset);
1076 	if (error)
1077 		return error;
1078 
1079 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
1080 			XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
1081 	if (error)
1082 		return error;
1083 
1084 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1085 	xfs_trans_ijoin(tp, ip, 0);
1086 
1087 	error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
1088 			XFS_IEXT_PUNCH_HOLE_CNT);
1089 	if (error)
1090 		goto out_trans_cancel;
1091 
1092 	/*
1093 	 * The extent shifting code works on extent granularity. So, if stop_fsb
1094 	 * is not the starting block of extent, we need to split the extent at
1095 	 * stop_fsb.
1096 	 */
1097 	error = xfs_bmap_split_extent(tp, ip, stop_fsb);
1098 	if (error)
1099 		goto out_trans_cancel;
1100 
1101 	do {
1102 		error = xfs_defer_finish(&tp);
1103 		if (error)
1104 			goto out_trans_cancel;
1105 
1106 		error = xfs_bmap_insert_extents(tp, ip, &next_fsb, shift_fsb,
1107 				&done, stop_fsb);
1108 		if (error)
1109 			goto out_trans_cancel;
1110 	} while (!done);
1111 
1112 	error = xfs_trans_commit(tp);
1113 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1114 	return error;
1115 
1116 out_trans_cancel:
1117 	xfs_trans_cancel(tp);
1118 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1119 	return error;
1120 }
1121 
1122 /*
1123  * We need to check that the format of the data fork in the temporary inode is
1124  * valid for the target inode before doing the swap. This is not a problem with
1125  * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1126  * data fork depending on the space the attribute fork is taking so we can get
1127  * invalid formats on the target inode.
1128  *
1129  * E.g. target has space for 7 extents in extent format, temp inode only has
1130  * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1131  * btree, but when swapped it needs to be in extent format. Hence we can't just
1132  * blindly swap data forks on attr2 filesystems.
1133  *
1134  * Note that we check the swap in both directions so that we don't end up with
1135  * a corrupt temporary inode, either.
1136  *
1137  * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1138  * inode will prevent this situation from occurring, so all we do here is
1139  * reject and log the attempt. basically we are putting the responsibility on
1140  * userspace to get this right.
1141  */
1142 static int
xfs_swap_extents_check_format(struct xfs_inode * ip,struct xfs_inode * tip)1143 xfs_swap_extents_check_format(
1144 	struct xfs_inode	*ip,	/* target inode */
1145 	struct xfs_inode	*tip)	/* tmp inode */
1146 {
1147 	struct xfs_ifork	*ifp = &ip->i_df;
1148 	struct xfs_ifork	*tifp = &tip->i_df;
1149 
1150 	/* User/group/project quota ids must match if quotas are enforced. */
1151 	if (XFS_IS_QUOTA_ON(ip->i_mount) &&
1152 	    (!uid_eq(VFS_I(ip)->i_uid, VFS_I(tip)->i_uid) ||
1153 	     !gid_eq(VFS_I(ip)->i_gid, VFS_I(tip)->i_gid) ||
1154 	     ip->i_projid != tip->i_projid))
1155 		return -EINVAL;
1156 
1157 	/* Should never get a local format */
1158 	if (ifp->if_format == XFS_DINODE_FMT_LOCAL ||
1159 	    tifp->if_format == XFS_DINODE_FMT_LOCAL)
1160 		return -EINVAL;
1161 
1162 	/*
1163 	 * if the target inode has less extents that then temporary inode then
1164 	 * why did userspace call us?
1165 	 */
1166 	if (ifp->if_nextents < tifp->if_nextents)
1167 		return -EINVAL;
1168 
1169 	/*
1170 	 * If we have to use the (expensive) rmap swap method, we can
1171 	 * handle any number of extents and any format.
1172 	 */
1173 	if (xfs_has_rmapbt(ip->i_mount))
1174 		return 0;
1175 
1176 	/*
1177 	 * if the target inode is in extent form and the temp inode is in btree
1178 	 * form then we will end up with the target inode in the wrong format
1179 	 * as we already know there are less extents in the temp inode.
1180 	 */
1181 	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1182 	    tifp->if_format == XFS_DINODE_FMT_BTREE)
1183 		return -EINVAL;
1184 
1185 	/* Check temp in extent form to max in target */
1186 	if (tifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1187 	    tifp->if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1188 		return -EINVAL;
1189 
1190 	/* Check target in extent form to max in temp */
1191 	if (ifp->if_format == XFS_DINODE_FMT_EXTENTS &&
1192 	    ifp->if_nextents > XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1193 		return -EINVAL;
1194 
1195 	/*
1196 	 * If we are in a btree format, check that the temp root block will fit
1197 	 * in the target and that it has enough extents to be in btree format
1198 	 * in the target.
1199 	 *
1200 	 * Note that we have to be careful to allow btree->extent conversions
1201 	 * (a common defrag case) which will occur when the temp inode is in
1202 	 * extent format...
1203 	 */
1204 	if (tifp->if_format == XFS_DINODE_FMT_BTREE) {
1205 		if (xfs_inode_has_attr_fork(ip) &&
1206 		    xfs_bmap_bmdr_space(tifp->if_broot) > xfs_inode_fork_boff(ip))
1207 			return -EINVAL;
1208 		if (tifp->if_nextents <= XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1209 			return -EINVAL;
1210 	}
1211 
1212 	/* Reciprocal target->temp btree format checks */
1213 	if (ifp->if_format == XFS_DINODE_FMT_BTREE) {
1214 		if (xfs_inode_has_attr_fork(tip) &&
1215 		    xfs_bmap_bmdr_space(ip->i_df.if_broot) > xfs_inode_fork_boff(tip))
1216 			return -EINVAL;
1217 		if (ifp->if_nextents <= XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1218 			return -EINVAL;
1219 	}
1220 
1221 	return 0;
1222 }
1223 
1224 static int
xfs_swap_extent_flush(struct xfs_inode * ip)1225 xfs_swap_extent_flush(
1226 	struct xfs_inode	*ip)
1227 {
1228 	int	error;
1229 
1230 	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1231 	if (error)
1232 		return error;
1233 	truncate_pagecache_range(VFS_I(ip), 0, -1);
1234 
1235 	/* Verify O_DIRECT for ftmp */
1236 	if (VFS_I(ip)->i_mapping->nrpages)
1237 		return -EINVAL;
1238 	return 0;
1239 }
1240 
1241 /*
1242  * Move extents from one file to another, when rmap is enabled.
1243  */
1244 STATIC int
xfs_swap_extent_rmap(struct xfs_trans ** tpp,struct xfs_inode * ip,struct xfs_inode * tip)1245 xfs_swap_extent_rmap(
1246 	struct xfs_trans		**tpp,
1247 	struct xfs_inode		*ip,
1248 	struct xfs_inode		*tip)
1249 {
1250 	struct xfs_trans		*tp = *tpp;
1251 	struct xfs_bmbt_irec		irec;
1252 	struct xfs_bmbt_irec		uirec;
1253 	struct xfs_bmbt_irec		tirec;
1254 	xfs_fileoff_t			offset_fsb;
1255 	xfs_fileoff_t			end_fsb;
1256 	xfs_filblks_t			count_fsb;
1257 	int				error;
1258 	xfs_filblks_t			ilen;
1259 	xfs_filblks_t			rlen;
1260 	int				nimaps;
1261 	uint64_t			tip_flags2;
1262 
1263 	/*
1264 	 * If the source file has shared blocks, we must flag the donor
1265 	 * file as having shared blocks so that we get the shared-block
1266 	 * rmap functions when we go to fix up the rmaps.  The flags
1267 	 * will be switch for reals later.
1268 	 */
1269 	tip_flags2 = tip->i_diflags2;
1270 	if (ip->i_diflags2 & XFS_DIFLAG2_REFLINK)
1271 		tip->i_diflags2 |= XFS_DIFLAG2_REFLINK;
1272 
1273 	offset_fsb = 0;
1274 	end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1275 	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1276 
1277 	while (count_fsb) {
1278 		/* Read extent from the donor file */
1279 		nimaps = 1;
1280 		error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1281 				&nimaps, 0);
1282 		if (error)
1283 			goto out;
1284 		ASSERT(nimaps == 1);
1285 		ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1286 
1287 		trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1288 		ilen = tirec.br_blockcount;
1289 
1290 		/* Unmap the old blocks in the source file. */
1291 		while (tirec.br_blockcount) {
1292 			ASSERT(tp->t_highest_agno == NULLAGNUMBER);
1293 			trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1294 
1295 			/* Read extent from the source file */
1296 			nimaps = 1;
1297 			error = xfs_bmapi_read(ip, tirec.br_startoff,
1298 					tirec.br_blockcount, &irec,
1299 					&nimaps, 0);
1300 			if (error)
1301 				goto out;
1302 			ASSERT(nimaps == 1);
1303 			ASSERT(tirec.br_startoff == irec.br_startoff);
1304 			trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1305 
1306 			/* Trim the extent. */
1307 			uirec = tirec;
1308 			uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1309 					tirec.br_blockcount,
1310 					irec.br_blockcount);
1311 			trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1312 
1313 			if (xfs_bmap_is_real_extent(&uirec)) {
1314 				error = xfs_iext_count_extend(tp, ip,
1315 						XFS_DATA_FORK,
1316 						XFS_IEXT_SWAP_RMAP_CNT);
1317 				if (error)
1318 					goto out;
1319 			}
1320 
1321 			if (xfs_bmap_is_real_extent(&irec)) {
1322 				error = xfs_iext_count_extend(tp, tip,
1323 						XFS_DATA_FORK,
1324 						XFS_IEXT_SWAP_RMAP_CNT);
1325 				if (error)
1326 					goto out;
1327 			}
1328 
1329 			/* Remove the mapping from the donor file. */
1330 			xfs_bmap_unmap_extent(tp, tip, XFS_DATA_FORK, &uirec);
1331 
1332 			/* Remove the mapping from the source file. */
1333 			xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &irec);
1334 
1335 			/* Map the donor file's blocks into the source file. */
1336 			xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, &uirec);
1337 
1338 			/* Map the source file's blocks into the donor file. */
1339 			xfs_bmap_map_extent(tp, tip, XFS_DATA_FORK, &irec);
1340 
1341 			error = xfs_defer_finish(tpp);
1342 			tp = *tpp;
1343 			if (error)
1344 				goto out;
1345 
1346 			tirec.br_startoff += rlen;
1347 			if (tirec.br_startblock != HOLESTARTBLOCK &&
1348 			    tirec.br_startblock != DELAYSTARTBLOCK)
1349 				tirec.br_startblock += rlen;
1350 			tirec.br_blockcount -= rlen;
1351 		}
1352 
1353 		/* Roll on... */
1354 		count_fsb -= ilen;
1355 		offset_fsb += ilen;
1356 	}
1357 
1358 	tip->i_diflags2 = tip_flags2;
1359 	return 0;
1360 
1361 out:
1362 	trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1363 	tip->i_diflags2 = tip_flags2;
1364 	return error;
1365 }
1366 
1367 /* Swap the extents of two files by swapping data forks. */
1368 STATIC int
xfs_swap_extent_forks(struct xfs_trans * tp,struct xfs_inode * ip,struct xfs_inode * tip,int * src_log_flags,int * target_log_flags)1369 xfs_swap_extent_forks(
1370 	struct xfs_trans	*tp,
1371 	struct xfs_inode	*ip,
1372 	struct xfs_inode	*tip,
1373 	int			*src_log_flags,
1374 	int			*target_log_flags)
1375 {
1376 	xfs_filblks_t		aforkblks = 0;
1377 	xfs_filblks_t		taforkblks = 0;
1378 	xfs_extnum_t		junk;
1379 	uint64_t		tmp;
1380 	int			error;
1381 
1382 	/*
1383 	 * Count the number of extended attribute blocks
1384 	 */
1385 	if (xfs_inode_has_attr_fork(ip) && ip->i_af.if_nextents > 0 &&
1386 	    ip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
1387 		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK, &junk,
1388 				&aforkblks);
1389 		if (error)
1390 			return error;
1391 	}
1392 	if (xfs_inode_has_attr_fork(tip) && tip->i_af.if_nextents > 0 &&
1393 	    tip->i_af.if_format != XFS_DINODE_FMT_LOCAL) {
1394 		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK, &junk,
1395 				&taforkblks);
1396 		if (error)
1397 			return error;
1398 	}
1399 
1400 	/*
1401 	 * Btree format (v3) inodes have the inode number stamped in the bmbt
1402 	 * block headers. We can't start changing the bmbt blocks until the
1403 	 * inode owner change is logged so recovery does the right thing in the
1404 	 * event of a crash. Set the owner change log flags now and leave the
1405 	 * bmbt scan as the last step.
1406 	 */
1407 	if (xfs_has_v3inodes(ip->i_mount)) {
1408 		if (ip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1409 			(*target_log_flags) |= XFS_ILOG_DOWNER;
1410 		if (tip->i_df.if_format == XFS_DINODE_FMT_BTREE)
1411 			(*src_log_flags) |= XFS_ILOG_DOWNER;
1412 	}
1413 
1414 	/*
1415 	 * Swap the data forks of the inodes
1416 	 */
1417 	swap(ip->i_df, tip->i_df);
1418 
1419 	/*
1420 	 * Fix the on-disk inode values
1421 	 */
1422 	tmp = (uint64_t)ip->i_nblocks;
1423 	ip->i_nblocks = tip->i_nblocks - taforkblks + aforkblks;
1424 	tip->i_nblocks = tmp + taforkblks - aforkblks;
1425 
1426 	/*
1427 	 * The extents in the source inode could still contain speculative
1428 	 * preallocation beyond EOF (e.g. the file is open but not modified
1429 	 * while defrag is in progress). In that case, we need to copy over the
1430 	 * number of delalloc blocks the data fork in the source inode is
1431 	 * tracking beyond EOF so that when the fork is truncated away when the
1432 	 * temporary inode is unlinked we don't underrun the i_delayed_blks
1433 	 * counter on that inode.
1434 	 */
1435 	ASSERT(tip->i_delayed_blks == 0);
1436 	tip->i_delayed_blks = ip->i_delayed_blks;
1437 	ip->i_delayed_blks = 0;
1438 
1439 	switch (ip->i_df.if_format) {
1440 	case XFS_DINODE_FMT_EXTENTS:
1441 		(*src_log_flags) |= XFS_ILOG_DEXT;
1442 		break;
1443 	case XFS_DINODE_FMT_BTREE:
1444 		ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
1445 		       (*src_log_flags & XFS_ILOG_DOWNER));
1446 		(*src_log_flags) |= XFS_ILOG_DBROOT;
1447 		break;
1448 	}
1449 
1450 	switch (tip->i_df.if_format) {
1451 	case XFS_DINODE_FMT_EXTENTS:
1452 		(*target_log_flags) |= XFS_ILOG_DEXT;
1453 		break;
1454 	case XFS_DINODE_FMT_BTREE:
1455 		(*target_log_flags) |= XFS_ILOG_DBROOT;
1456 		ASSERT(!xfs_has_v3inodes(ip->i_mount) ||
1457 		       (*target_log_flags & XFS_ILOG_DOWNER));
1458 		break;
1459 	}
1460 
1461 	return 0;
1462 }
1463 
1464 /*
1465  * Fix up the owners of the bmbt blocks to refer to the current inode. The
1466  * change owner scan attempts to order all modified buffers in the current
1467  * transaction. In the event of ordered buffer failure, the offending buffer is
1468  * physically logged as a fallback and the scan returns -EAGAIN. We must roll
1469  * the transaction in this case to replenish the fallback log reservation and
1470  * restart the scan. This process repeats until the scan completes.
1471  */
1472 static int
xfs_swap_change_owner(struct xfs_trans ** tpp,struct xfs_inode * ip,struct xfs_inode * tmpip)1473 xfs_swap_change_owner(
1474 	struct xfs_trans	**tpp,
1475 	struct xfs_inode	*ip,
1476 	struct xfs_inode	*tmpip)
1477 {
1478 	int			error;
1479 	struct xfs_trans	*tp = *tpp;
1480 
1481 	do {
1482 		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK, ip->i_ino,
1483 					      NULL);
1484 		/* success or fatal error */
1485 		if (error != -EAGAIN)
1486 			break;
1487 
1488 		error = xfs_trans_roll(tpp);
1489 		if (error)
1490 			break;
1491 		tp = *tpp;
1492 
1493 		/*
1494 		 * Redirty both inodes so they can relog and keep the log tail
1495 		 * moving forward.
1496 		 */
1497 		xfs_trans_ijoin(tp, ip, 0);
1498 		xfs_trans_ijoin(tp, tmpip, 0);
1499 		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1500 		xfs_trans_log_inode(tp, tmpip, XFS_ILOG_CORE);
1501 	} while (true);
1502 
1503 	return error;
1504 }
1505 
1506 int
xfs_swap_extents(struct xfs_inode * ip,struct xfs_inode * tip,struct xfs_swapext * sxp)1507 xfs_swap_extents(
1508 	struct xfs_inode	*ip,	/* target inode */
1509 	struct xfs_inode	*tip,	/* tmp inode */
1510 	struct xfs_swapext	*sxp)
1511 {
1512 	struct xfs_mount	*mp = ip->i_mount;
1513 	struct xfs_trans	*tp;
1514 	struct xfs_bstat	*sbp = &sxp->sx_stat;
1515 	int			src_log_flags, target_log_flags;
1516 	int			error = 0;
1517 	uint64_t		f;
1518 	int			resblks = 0;
1519 	unsigned int		flags = 0;
1520 	struct timespec64	ctime, mtime;
1521 
1522 	/*
1523 	 * Lock the inodes against other IO, page faults and truncate to
1524 	 * begin with.  Then we can ensure the inodes are flushed and have no
1525 	 * page cache safely. Once we have done this we can take the ilocks and
1526 	 * do the rest of the checks.
1527 	 */
1528 	lock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1529 	filemap_invalidate_lock_two(VFS_I(ip)->i_mapping,
1530 				    VFS_I(tip)->i_mapping);
1531 
1532 	/* Verify that both files have the same format */
1533 	if ((VFS_I(ip)->i_mode & S_IFMT) != (VFS_I(tip)->i_mode & S_IFMT)) {
1534 		error = -EINVAL;
1535 		goto out_unlock;
1536 	}
1537 
1538 	/* Verify both files are either real-time or non-realtime */
1539 	if (XFS_IS_REALTIME_INODE(ip) != XFS_IS_REALTIME_INODE(tip)) {
1540 		error = -EINVAL;
1541 		goto out_unlock;
1542 	}
1543 
1544 	/*
1545 	 * The rmapbt implementation is unable to resume a swapext operation
1546 	 * after a crash if the allocation unit size is larger than a block.
1547 	 * This (deprecated) interface will not be upgraded to handle this
1548 	 * situation.  Defragmentation must be performed with the commit range
1549 	 * ioctl.
1550 	 */
1551 	if (XFS_IS_REALTIME_INODE(ip) && xfs_has_rtgroups(ip->i_mount)) {
1552 		error = -EOPNOTSUPP;
1553 		goto out_unlock;
1554 	}
1555 
1556 	error = xfs_qm_dqattach(ip);
1557 	if (error)
1558 		goto out_unlock;
1559 
1560 	error = xfs_qm_dqattach(tip);
1561 	if (error)
1562 		goto out_unlock;
1563 
1564 	error = xfs_swap_extent_flush(ip);
1565 	if (error)
1566 		goto out_unlock;
1567 	error = xfs_swap_extent_flush(tip);
1568 	if (error)
1569 		goto out_unlock;
1570 
1571 	if (xfs_inode_has_cow_data(tip)) {
1572 		error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
1573 		if (error)
1574 			goto out_unlock;
1575 	}
1576 
1577 	/*
1578 	 * Extent "swapping" with rmap requires a permanent reservation and
1579 	 * a block reservation because it's really just a remap operation
1580 	 * performed with log redo items!
1581 	 */
1582 	if (xfs_has_rmapbt(mp)) {
1583 		int		w = XFS_DATA_FORK;
1584 		uint32_t	ipnext = ip->i_df.if_nextents;
1585 		uint32_t	tipnext	= tip->i_df.if_nextents;
1586 
1587 		/*
1588 		 * Conceptually this shouldn't affect the shape of either bmbt,
1589 		 * but since we atomically move extents one by one, we reserve
1590 		 * enough space to rebuild both trees.
1591 		 */
1592 		resblks = XFS_SWAP_RMAP_SPACE_RES(mp, ipnext, w);
1593 		resblks +=  XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w);
1594 
1595 		/*
1596 		 * If either inode straddles a bmapbt block allocation boundary,
1597 		 * the rmapbt algorithm triggers repeated allocs and frees as
1598 		 * extents are remapped. This can exhaust the block reservation
1599 		 * prematurely and cause shutdown. Return freed blocks to the
1600 		 * transaction reservation to counter this behavior.
1601 		 */
1602 		flags |= XFS_TRANS_RES_FDBLKS;
1603 	}
1604 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags,
1605 				&tp);
1606 	if (error)
1607 		goto out_unlock;
1608 
1609 	/*
1610 	 * Lock and join the inodes to the tansaction so that transaction commit
1611 	 * or cancel will unlock the inodes from this point onwards.
1612 	 */
1613 	xfs_lock_two_inodes(ip, XFS_ILOCK_EXCL, tip, XFS_ILOCK_EXCL);
1614 	xfs_trans_ijoin(tp, ip, 0);
1615 	xfs_trans_ijoin(tp, tip, 0);
1616 
1617 
1618 	/* Verify all data are being swapped */
1619 	if (sxp->sx_offset != 0 ||
1620 	    sxp->sx_length != ip->i_disk_size ||
1621 	    sxp->sx_length != tip->i_disk_size) {
1622 		error = -EFAULT;
1623 		goto out_trans_cancel;
1624 	}
1625 
1626 	trace_xfs_swap_extent_before(ip, 0);
1627 	trace_xfs_swap_extent_before(tip, 1);
1628 
1629 	/* check inode formats now that data is flushed */
1630 	error = xfs_swap_extents_check_format(ip, tip);
1631 	if (error) {
1632 		xfs_notice(mp,
1633 		    "%s: inode 0x%llx format is incompatible for exchanging.",
1634 				__func__, ip->i_ino);
1635 		goto out_trans_cancel;
1636 	}
1637 
1638 	/*
1639 	 * Compare the current change & modify times with that
1640 	 * passed in.  If they differ, we abort this swap.
1641 	 * This is the mechanism used to ensure the calling
1642 	 * process that the file was not changed out from
1643 	 * under it.
1644 	 */
1645 	ctime = inode_get_ctime(VFS_I(ip));
1646 	mtime = inode_get_mtime(VFS_I(ip));
1647 	if ((sbp->bs_ctime.tv_sec != ctime.tv_sec) ||
1648 	    (sbp->bs_ctime.tv_nsec != ctime.tv_nsec) ||
1649 	    (sbp->bs_mtime.tv_sec != mtime.tv_sec) ||
1650 	    (sbp->bs_mtime.tv_nsec != mtime.tv_nsec)) {
1651 		error = -EBUSY;
1652 		goto out_trans_cancel;
1653 	}
1654 
1655 	/*
1656 	 * Note the trickiness in setting the log flags - we set the owner log
1657 	 * flag on the opposite inode (i.e. the inode we are setting the new
1658 	 * owner to be) because once we swap the forks and log that, log
1659 	 * recovery is going to see the fork as owned by the swapped inode,
1660 	 * not the pre-swapped inodes.
1661 	 */
1662 	src_log_flags = XFS_ILOG_CORE;
1663 	target_log_flags = XFS_ILOG_CORE;
1664 
1665 	if (xfs_has_rmapbt(mp))
1666 		error = xfs_swap_extent_rmap(&tp, ip, tip);
1667 	else
1668 		error = xfs_swap_extent_forks(tp, ip, tip, &src_log_flags,
1669 				&target_log_flags);
1670 	if (error)
1671 		goto out_trans_cancel;
1672 
1673 	/* Do we have to swap reflink flags? */
1674 	if ((ip->i_diflags2 & XFS_DIFLAG2_REFLINK) ^
1675 	    (tip->i_diflags2 & XFS_DIFLAG2_REFLINK)) {
1676 		f = ip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1677 		ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1678 		ip->i_diflags2 |= tip->i_diflags2 & XFS_DIFLAG2_REFLINK;
1679 		tip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1680 		tip->i_diflags2 |= f & XFS_DIFLAG2_REFLINK;
1681 	}
1682 
1683 	/* Swap the cow forks. */
1684 	if (xfs_has_reflink(mp)) {
1685 		ASSERT(!ip->i_cowfp ||
1686 		       ip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1687 		ASSERT(!tip->i_cowfp ||
1688 		       tip->i_cowfp->if_format == XFS_DINODE_FMT_EXTENTS);
1689 
1690 		swap(ip->i_cowfp, tip->i_cowfp);
1691 
1692 		if (ip->i_cowfp && ip->i_cowfp->if_bytes)
1693 			xfs_inode_set_cowblocks_tag(ip);
1694 		else
1695 			xfs_inode_clear_cowblocks_tag(ip);
1696 		if (tip->i_cowfp && tip->i_cowfp->if_bytes)
1697 			xfs_inode_set_cowblocks_tag(tip);
1698 		else
1699 			xfs_inode_clear_cowblocks_tag(tip);
1700 	}
1701 
1702 	xfs_trans_log_inode(tp, ip,  src_log_flags);
1703 	xfs_trans_log_inode(tp, tip, target_log_flags);
1704 
1705 	/*
1706 	 * The extent forks have been swapped, but crc=1,rmapbt=0 filesystems
1707 	 * have inode number owner values in the bmbt blocks that still refer to
1708 	 * the old inode. Scan each bmbt to fix up the owner values with the
1709 	 * inode number of the current inode.
1710 	 */
1711 	if (src_log_flags & XFS_ILOG_DOWNER) {
1712 		error = xfs_swap_change_owner(&tp, ip, tip);
1713 		if (error)
1714 			goto out_trans_cancel;
1715 	}
1716 	if (target_log_flags & XFS_ILOG_DOWNER) {
1717 		error = xfs_swap_change_owner(&tp, tip, ip);
1718 		if (error)
1719 			goto out_trans_cancel;
1720 	}
1721 
1722 	/*
1723 	 * If this is a synchronous mount, make sure that the
1724 	 * transaction goes to disk before returning to the user.
1725 	 */
1726 	if (xfs_has_wsync(mp))
1727 		xfs_trans_set_sync(tp);
1728 
1729 	error = xfs_trans_commit(tp);
1730 
1731 	trace_xfs_swap_extent_after(ip, 0);
1732 	trace_xfs_swap_extent_after(tip, 1);
1733 
1734 out_unlock_ilock:
1735 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1736 	xfs_iunlock(tip, XFS_ILOCK_EXCL);
1737 out_unlock:
1738 	filemap_invalidate_unlock_two(VFS_I(ip)->i_mapping,
1739 				      VFS_I(tip)->i_mapping);
1740 	unlock_two_nondirectories(VFS_I(ip), VFS_I(tip));
1741 	return error;
1742 
1743 out_trans_cancel:
1744 	xfs_trans_cancel(tp);
1745 	goto out_unlock_ilock;
1746 }
1747