xref: /linux/fs/xfs/xfs_reflink.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2016 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_defer.h"
14 #include "xfs_inode.h"
15 #include "xfs_trans.h"
16 #include "xfs_bmap.h"
17 #include "xfs_bmap_util.h"
18 #include "xfs_trace.h"
19 #include "xfs_icache.h"
20 #include "xfs_btree.h"
21 #include "xfs_refcount_btree.h"
22 #include "xfs_refcount.h"
23 #include "xfs_bmap_btree.h"
24 #include "xfs_trans_space.h"
25 #include "xfs_bit.h"
26 #include "xfs_alloc.h"
27 #include "xfs_quota.h"
28 #include "xfs_reflink.h"
29 #include "xfs_iomap.h"
30 #include "xfs_ag.h"
31 #include "xfs_ag_resv.h"
32 #include "xfs_health.h"
33 
34 /*
35  * Copy on Write of Shared Blocks
36  *
37  * XFS must preserve "the usual" file semantics even when two files share
38  * the same physical blocks.  This means that a write to one file must not
39  * alter the blocks in a different file; the way that we'll do that is
40  * through the use of a copy-on-write mechanism.  At a high level, that
41  * means that when we want to write to a shared block, we allocate a new
42  * block, write the data to the new block, and if that succeeds we map the
43  * new block into the file.
44  *
45  * XFS provides a "delayed allocation" mechanism that defers the allocation
46  * of disk blocks to dirty-but-not-yet-mapped file blocks as long as
47  * possible.  This reduces fragmentation by enabling the filesystem to ask
48  * for bigger chunks less often, which is exactly what we want for CoW.
49  *
50  * The delalloc mechanism begins when the kernel wants to make a block
51  * writable (write_begin or page_mkwrite).  If the offset is not mapped, we
52  * create a delalloc mapping, which is a regular in-core extent, but without
53  * a real startblock.  (For delalloc mappings, the startblock encodes both
54  * a flag that this is a delalloc mapping, and a worst-case estimate of how
55  * many blocks might be required to put the mapping into the BMBT.)  delalloc
56  * mappings are a reservation against the free space in the filesystem;
57  * adjacent mappings can also be combined into fewer larger mappings.
58  *
59  * As an optimization, the CoW extent size hint (cowextsz) creates
60  * outsized aligned delalloc reservations in the hope of landing out of
61  * order nearby CoW writes in a single extent on disk, thereby reducing
62  * fragmentation and improving future performance.
63  *
64  * D: --RRRRRRSSSRRRRRRRR--- (data fork)
65  * C: ------DDDDDDD--------- (CoW fork)
66  *
67  * When dirty pages are being written out (typically in writepage), the
68  * delalloc reservations are converted into unwritten mappings by
69  * allocating blocks and replacing the delalloc mapping with real ones.
70  * A delalloc mapping can be replaced by several unwritten ones if the
71  * free space is fragmented.
72  *
73  * D: --RRRRRRSSSRRRRRRRR---
74  * C: ------UUUUUUU---------
75  *
76  * We want to adapt the delalloc mechanism for copy-on-write, since the
77  * write paths are similar.  The first two steps (creating the reservation
78  * and allocating the blocks) are exactly the same as delalloc except that
79  * the mappings must be stored in a separate CoW fork because we do not want
80  * to disturb the mapping in the data fork until we're sure that the write
81  * succeeded.  IO completion in this case is the process of removing the old
82  * mapping from the data fork and moving the new mapping from the CoW fork to
83  * the data fork.  This will be discussed shortly.
84  *
85  * For now, unaligned directio writes will be bounced back to the page cache.
86  * Block-aligned directio writes will use the same mechanism as buffered
87  * writes.
88  *
89  * Just prior to submitting the actual disk write requests, we convert
90  * the extents representing the range of the file actually being written
91  * (as opposed to extra pieces created for the cowextsize hint) to real
92  * extents.  This will become important in the next step:
93  *
94  * D: --RRRRRRSSSRRRRRRRR---
95  * C: ------UUrrUUU---------
96  *
97  * CoW remapping must be done after the data block write completes,
98  * because we don't want to destroy the old data fork map until we're sure
99  * the new block has been written.  Since the new mappings are kept in a
100  * separate fork, we can simply iterate these mappings to find the ones
101  * that cover the file blocks that we just CoW'd.  For each extent, simply
102  * unmap the corresponding range in the data fork, map the new range into
103  * the data fork, and remove the extent from the CoW fork.  Because of
104  * the presence of the cowextsize hint, however, we must be careful
105  * only to remap the blocks that we've actually written out --  we must
106  * never remap delalloc reservations nor CoW staging blocks that have
107  * yet to be written.  This corresponds exactly to the real extents in
108  * the CoW fork:
109  *
110  * D: --RRRRRRrrSRRRRRRRR---
111  * C: ------UU--UUU---------
112  *
113  * Since the remapping operation can be applied to an arbitrary file
114  * range, we record the need for the remap step as a flag in the ioend
115  * instead of declaring a new IO type.  This is required for direct io
116  * because we only have ioend for the whole dio, and we have to be able to
117  * remember the presence of unwritten blocks and CoW blocks with a single
118  * ioend structure.  Better yet, the more ground we can cover with one
119  * ioend, the better.
120  */
121 
122 /*
123  * Given an AG extent, find the lowest-numbered run of shared blocks
124  * within that range and return the range in fbno/flen.  If
125  * find_end_of_shared is true, return the longest contiguous extent of
126  * shared blocks.  If there are no shared extents, fbno and flen will
127  * be set to NULLAGBLOCK and 0, respectively.
128  */
129 static int
xfs_reflink_find_shared(struct xfs_perag * pag,struct xfs_trans * tp,xfs_agblock_t agbno,xfs_extlen_t aglen,xfs_agblock_t * fbno,xfs_extlen_t * flen,bool find_end_of_shared)130 xfs_reflink_find_shared(
131 	struct xfs_perag	*pag,
132 	struct xfs_trans	*tp,
133 	xfs_agblock_t		agbno,
134 	xfs_extlen_t		aglen,
135 	xfs_agblock_t		*fbno,
136 	xfs_extlen_t		*flen,
137 	bool			find_end_of_shared)
138 {
139 	struct xfs_buf		*agbp;
140 	struct xfs_btree_cur	*cur;
141 	int			error;
142 
143 	error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
144 	if (error)
145 		return error;
146 
147 	cur = xfs_refcountbt_init_cursor(pag->pag_mount, tp, agbp, pag);
148 
149 	error = xfs_refcount_find_shared(cur, agbno, aglen, fbno, flen,
150 			find_end_of_shared);
151 
152 	xfs_btree_del_cursor(cur, error);
153 
154 	xfs_trans_brelse(tp, agbp);
155 	return error;
156 }
157 
158 /*
159  * Trim the mapping to the next block where there's a change in the
160  * shared/unshared status.  More specifically, this means that we
161  * find the lowest-numbered extent of shared blocks that coincides with
162  * the given block mapping.  If the shared extent overlaps the start of
163  * the mapping, trim the mapping to the end of the shared extent.  If
164  * the shared region intersects the mapping, trim the mapping to the
165  * start of the shared extent.  If there are no shared regions that
166  * overlap, just return the original extent.
167  */
168 int
xfs_reflink_trim_around_shared(struct xfs_inode * ip,struct xfs_bmbt_irec * irec,bool * shared)169 xfs_reflink_trim_around_shared(
170 	struct xfs_inode	*ip,
171 	struct xfs_bmbt_irec	*irec,
172 	bool			*shared)
173 {
174 	struct xfs_mount	*mp = ip->i_mount;
175 	struct xfs_perag	*pag;
176 	xfs_agblock_t		agbno;
177 	xfs_extlen_t		aglen;
178 	xfs_agblock_t		fbno;
179 	xfs_extlen_t		flen;
180 	int			error = 0;
181 
182 	/* Holes, unwritten, and delalloc extents cannot be shared */
183 	if (!xfs_is_cow_inode(ip) || !xfs_bmap_is_written_extent(irec)) {
184 		*shared = false;
185 		return 0;
186 	}
187 
188 	trace_xfs_reflink_trim_around_shared(ip, irec);
189 
190 	pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, irec->br_startblock));
191 	agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
192 	aglen = irec->br_blockcount;
193 
194 	error = xfs_reflink_find_shared(pag, NULL, agbno, aglen, &fbno, &flen,
195 			true);
196 	xfs_perag_put(pag);
197 	if (error)
198 		return error;
199 
200 	*shared = false;
201 	if (fbno == NULLAGBLOCK) {
202 		/* No shared blocks at all. */
203 		return 0;
204 	}
205 
206 	if (fbno == agbno) {
207 		/*
208 		 * The start of this extent is shared.  Truncate the
209 		 * mapping at the end of the shared region so that a
210 		 * subsequent iteration starts at the start of the
211 		 * unshared region.
212 		 */
213 		irec->br_blockcount = flen;
214 		*shared = true;
215 		return 0;
216 	}
217 
218 	/*
219 	 * There's a shared extent midway through this extent.
220 	 * Truncate the mapping at the start of the shared
221 	 * extent so that a subsequent iteration starts at the
222 	 * start of the shared region.
223 	 */
224 	irec->br_blockcount = fbno - agbno;
225 	return 0;
226 }
227 
228 int
xfs_bmap_trim_cow(struct xfs_inode * ip,struct xfs_bmbt_irec * imap,bool * shared)229 xfs_bmap_trim_cow(
230 	struct xfs_inode	*ip,
231 	struct xfs_bmbt_irec	*imap,
232 	bool			*shared)
233 {
234 	/* We can't update any real extents in always COW mode. */
235 	if (xfs_is_always_cow_inode(ip) &&
236 	    !isnullstartblock(imap->br_startblock)) {
237 		*shared = true;
238 		return 0;
239 	}
240 
241 	/* Trim the mapping to the nearest shared extent boundary. */
242 	return xfs_reflink_trim_around_shared(ip, imap, shared);
243 }
244 
245 static int
xfs_reflink_convert_cow_locked(struct xfs_inode * ip,xfs_fileoff_t offset_fsb,xfs_filblks_t count_fsb)246 xfs_reflink_convert_cow_locked(
247 	struct xfs_inode	*ip,
248 	xfs_fileoff_t		offset_fsb,
249 	xfs_filblks_t		count_fsb)
250 {
251 	struct xfs_iext_cursor	icur;
252 	struct xfs_bmbt_irec	got;
253 	struct xfs_btree_cur	*dummy_cur = NULL;
254 	int			dummy_logflags;
255 	int			error = 0;
256 
257 	if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got))
258 		return 0;
259 
260 	do {
261 		if (got.br_startoff >= offset_fsb + count_fsb)
262 			break;
263 		if (got.br_state == XFS_EXT_NORM)
264 			continue;
265 		if (WARN_ON_ONCE(isnullstartblock(got.br_startblock)))
266 			return -EIO;
267 
268 		xfs_trim_extent(&got, offset_fsb, count_fsb);
269 		if (!got.br_blockcount)
270 			continue;
271 
272 		got.br_state = XFS_EXT_NORM;
273 		error = xfs_bmap_add_extent_unwritten_real(NULL, ip,
274 				XFS_COW_FORK, &icur, &dummy_cur, &got,
275 				&dummy_logflags);
276 		if (error)
277 			return error;
278 	} while (xfs_iext_next_extent(ip->i_cowfp, &icur, &got));
279 
280 	return error;
281 }
282 
283 /* Convert all of the unwritten CoW extents in a file's range to real ones. */
284 int
xfs_reflink_convert_cow(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t count)285 xfs_reflink_convert_cow(
286 	struct xfs_inode	*ip,
287 	xfs_off_t		offset,
288 	xfs_off_t		count)
289 {
290 	struct xfs_mount	*mp = ip->i_mount;
291 	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
292 	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + count);
293 	xfs_filblks_t		count_fsb = end_fsb - offset_fsb;
294 	int			error;
295 
296 	ASSERT(count != 0);
297 
298 	xfs_ilock(ip, XFS_ILOCK_EXCL);
299 	error = xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb);
300 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
301 	return error;
302 }
303 
304 /*
305  * Find the extent that maps the given range in the COW fork. Even if the extent
306  * is not shared we might have a preallocation for it in the COW fork. If so we
307  * use it that rather than trigger a new allocation.
308  */
309 static int
xfs_find_trim_cow_extent(struct xfs_inode * ip,struct xfs_bmbt_irec * imap,struct xfs_bmbt_irec * cmap,bool * shared,bool * found)310 xfs_find_trim_cow_extent(
311 	struct xfs_inode	*ip,
312 	struct xfs_bmbt_irec	*imap,
313 	struct xfs_bmbt_irec	*cmap,
314 	bool			*shared,
315 	bool			*found)
316 {
317 	xfs_fileoff_t		offset_fsb = imap->br_startoff;
318 	xfs_filblks_t		count_fsb = imap->br_blockcount;
319 	struct xfs_iext_cursor	icur;
320 
321 	*found = false;
322 
323 	/*
324 	 * If we don't find an overlapping extent, trim the range we need to
325 	 * allocate to fit the hole we found.
326 	 */
327 	if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, cmap))
328 		cmap->br_startoff = offset_fsb + count_fsb;
329 	if (cmap->br_startoff > offset_fsb) {
330 		xfs_trim_extent(imap, imap->br_startoff,
331 				cmap->br_startoff - imap->br_startoff);
332 		return xfs_bmap_trim_cow(ip, imap, shared);
333 	}
334 
335 	*shared = true;
336 	if (isnullstartblock(cmap->br_startblock)) {
337 		xfs_trim_extent(imap, cmap->br_startoff, cmap->br_blockcount);
338 		return 0;
339 	}
340 
341 	/* real extent found - no need to allocate */
342 	xfs_trim_extent(cmap, offset_fsb, count_fsb);
343 	*found = true;
344 	return 0;
345 }
346 
347 static int
xfs_reflink_convert_unwritten(struct xfs_inode * ip,struct xfs_bmbt_irec * imap,struct xfs_bmbt_irec * cmap,bool convert_now)348 xfs_reflink_convert_unwritten(
349 	struct xfs_inode	*ip,
350 	struct xfs_bmbt_irec	*imap,
351 	struct xfs_bmbt_irec	*cmap,
352 	bool			convert_now)
353 {
354 	xfs_fileoff_t		offset_fsb = imap->br_startoff;
355 	xfs_filblks_t		count_fsb = imap->br_blockcount;
356 	int			error;
357 
358 	/*
359 	 * cmap might larger than imap due to cowextsize hint.
360 	 */
361 	xfs_trim_extent(cmap, offset_fsb, count_fsb);
362 
363 	/*
364 	 * COW fork extents are supposed to remain unwritten until we're ready
365 	 * to initiate a disk write.  For direct I/O we are going to write the
366 	 * data and need the conversion, but for buffered writes we're done.
367 	 */
368 	if (!convert_now || cmap->br_state == XFS_EXT_NORM)
369 		return 0;
370 
371 	trace_xfs_reflink_convert_cow(ip, cmap);
372 
373 	error = xfs_reflink_convert_cow_locked(ip, offset_fsb, count_fsb);
374 	if (!error)
375 		cmap->br_state = XFS_EXT_NORM;
376 
377 	return error;
378 }
379 
380 static int
xfs_reflink_fill_cow_hole(struct xfs_inode * ip,struct xfs_bmbt_irec * imap,struct xfs_bmbt_irec * cmap,bool * shared,uint * lockmode,bool convert_now)381 xfs_reflink_fill_cow_hole(
382 	struct xfs_inode	*ip,
383 	struct xfs_bmbt_irec	*imap,
384 	struct xfs_bmbt_irec	*cmap,
385 	bool			*shared,
386 	uint			*lockmode,
387 	bool			convert_now)
388 {
389 	struct xfs_mount	*mp = ip->i_mount;
390 	struct xfs_trans	*tp;
391 	xfs_filblks_t		resaligned;
392 	xfs_extlen_t		resblks;
393 	int			nimaps;
394 	int			error;
395 	bool			found;
396 
397 	resaligned = xfs_aligned_fsb_count(imap->br_startoff,
398 		imap->br_blockcount, xfs_get_cowextsz_hint(ip));
399 	resblks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
400 
401 	xfs_iunlock(ip, *lockmode);
402 	*lockmode = 0;
403 
404 	error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 0,
405 			false, &tp);
406 	if (error)
407 		return error;
408 
409 	*lockmode = XFS_ILOCK_EXCL;
410 
411 	error = xfs_find_trim_cow_extent(ip, imap, cmap, shared, &found);
412 	if (error || !*shared)
413 		goto out_trans_cancel;
414 
415 	if (found) {
416 		xfs_trans_cancel(tp);
417 		goto convert;
418 	}
419 
420 	/* Allocate the entire reservation as unwritten blocks. */
421 	nimaps = 1;
422 	error = xfs_bmapi_write(tp, ip, imap->br_startoff, imap->br_blockcount,
423 			XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, 0, cmap,
424 			&nimaps);
425 	if (error)
426 		goto out_trans_cancel;
427 
428 	xfs_inode_set_cowblocks_tag(ip);
429 	error = xfs_trans_commit(tp);
430 	if (error)
431 		return error;
432 
433 convert:
434 	return xfs_reflink_convert_unwritten(ip, imap, cmap, convert_now);
435 
436 out_trans_cancel:
437 	xfs_trans_cancel(tp);
438 	return error;
439 }
440 
441 static int
xfs_reflink_fill_delalloc(struct xfs_inode * ip,struct xfs_bmbt_irec * imap,struct xfs_bmbt_irec * cmap,bool * shared,uint * lockmode,bool convert_now)442 xfs_reflink_fill_delalloc(
443 	struct xfs_inode	*ip,
444 	struct xfs_bmbt_irec	*imap,
445 	struct xfs_bmbt_irec	*cmap,
446 	bool			*shared,
447 	uint			*lockmode,
448 	bool			convert_now)
449 {
450 	struct xfs_mount	*mp = ip->i_mount;
451 	struct xfs_trans	*tp;
452 	int			nimaps;
453 	int			error;
454 	bool			found;
455 
456 	do {
457 		xfs_iunlock(ip, *lockmode);
458 		*lockmode = 0;
459 
460 		error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, 0, 0,
461 				false, &tp);
462 		if (error)
463 			return error;
464 
465 		*lockmode = XFS_ILOCK_EXCL;
466 
467 		error = xfs_find_trim_cow_extent(ip, imap, cmap, shared,
468 				&found);
469 		if (error || !*shared)
470 			goto out_trans_cancel;
471 
472 		if (found) {
473 			xfs_trans_cancel(tp);
474 			break;
475 		}
476 
477 		ASSERT(isnullstartblock(cmap->br_startblock) ||
478 		       cmap->br_startblock == DELAYSTARTBLOCK);
479 
480 		/*
481 		 * Replace delalloc reservation with an unwritten extent.
482 		 */
483 		nimaps = 1;
484 		error = xfs_bmapi_write(tp, ip, cmap->br_startoff,
485 				cmap->br_blockcount,
486 				XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, 0,
487 				cmap, &nimaps);
488 		if (error)
489 			goto out_trans_cancel;
490 
491 		xfs_inode_set_cowblocks_tag(ip);
492 		error = xfs_trans_commit(tp);
493 		if (error)
494 			return error;
495 	} while (cmap->br_startoff + cmap->br_blockcount <= imap->br_startoff);
496 
497 	return xfs_reflink_convert_unwritten(ip, imap, cmap, convert_now);
498 
499 out_trans_cancel:
500 	xfs_trans_cancel(tp);
501 	return error;
502 }
503 
504 /* Allocate all CoW reservations covering a range of blocks in a file. */
505 int
xfs_reflink_allocate_cow(struct xfs_inode * ip,struct xfs_bmbt_irec * imap,struct xfs_bmbt_irec * cmap,bool * shared,uint * lockmode,bool convert_now)506 xfs_reflink_allocate_cow(
507 	struct xfs_inode	*ip,
508 	struct xfs_bmbt_irec	*imap,
509 	struct xfs_bmbt_irec	*cmap,
510 	bool			*shared,
511 	uint			*lockmode,
512 	bool			convert_now)
513 {
514 	int			error;
515 	bool			found;
516 
517 	xfs_assert_ilocked(ip, XFS_ILOCK_EXCL);
518 	if (!ip->i_cowfp) {
519 		ASSERT(!xfs_is_reflink_inode(ip));
520 		xfs_ifork_init_cow(ip);
521 	}
522 
523 	error = xfs_find_trim_cow_extent(ip, imap, cmap, shared, &found);
524 	if (error || !*shared)
525 		return error;
526 
527 	/* CoW fork has a real extent */
528 	if (found)
529 		return xfs_reflink_convert_unwritten(ip, imap, cmap,
530 				convert_now);
531 
532 	/*
533 	 * CoW fork does not have an extent and data extent is shared.
534 	 * Allocate a real extent in the CoW fork.
535 	 */
536 	if (cmap->br_startoff > imap->br_startoff)
537 		return xfs_reflink_fill_cow_hole(ip, imap, cmap, shared,
538 				lockmode, convert_now);
539 
540 	/*
541 	 * CoW fork has a delalloc reservation. Replace it with a real extent.
542 	 * There may or may not be a data fork mapping.
543 	 */
544 	if (isnullstartblock(cmap->br_startblock) ||
545 	    cmap->br_startblock == DELAYSTARTBLOCK)
546 		return xfs_reflink_fill_delalloc(ip, imap, cmap, shared,
547 				lockmode, convert_now);
548 
549 	/* Shouldn't get here. */
550 	ASSERT(0);
551 	return -EFSCORRUPTED;
552 }
553 
554 /*
555  * Cancel CoW reservations for some block range of an inode.
556  *
557  * If cancel_real is true this function cancels all COW fork extents for the
558  * inode; if cancel_real is false, real extents are not cleared.
559  *
560  * Caller must have already joined the inode to the current transaction. The
561  * inode will be joined to the transaction returned to the caller.
562  */
563 int
xfs_reflink_cancel_cow_blocks(struct xfs_inode * ip,struct xfs_trans ** tpp,xfs_fileoff_t offset_fsb,xfs_fileoff_t end_fsb,bool cancel_real)564 xfs_reflink_cancel_cow_blocks(
565 	struct xfs_inode		*ip,
566 	struct xfs_trans		**tpp,
567 	xfs_fileoff_t			offset_fsb,
568 	xfs_fileoff_t			end_fsb,
569 	bool				cancel_real)
570 {
571 	struct xfs_ifork		*ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
572 	struct xfs_bmbt_irec		got, del;
573 	struct xfs_iext_cursor		icur;
574 	int				error = 0;
575 
576 	if (!xfs_inode_has_cow_data(ip))
577 		return 0;
578 	if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
579 		return 0;
580 
581 	/* Walk backwards until we're out of the I/O range... */
582 	while (got.br_startoff + got.br_blockcount > offset_fsb) {
583 		del = got;
584 		xfs_trim_extent(&del, offset_fsb, end_fsb - offset_fsb);
585 
586 		/* Extent delete may have bumped ext forward */
587 		if (!del.br_blockcount) {
588 			xfs_iext_prev(ifp, &icur);
589 			goto next_extent;
590 		}
591 
592 		trace_xfs_reflink_cancel_cow(ip, &del);
593 
594 		if (isnullstartblock(del.br_startblock)) {
595 			xfs_bmap_del_extent_delay(ip, XFS_COW_FORK, &icur, &got,
596 					&del);
597 		} else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
598 			ASSERT((*tpp)->t_highest_agno == NULLAGNUMBER);
599 
600 			/* Free the CoW orphan record. */
601 			xfs_refcount_free_cow_extent(*tpp, del.br_startblock,
602 					del.br_blockcount);
603 
604 			error = xfs_free_extent_later(*tpp, del.br_startblock,
605 					del.br_blockcount, NULL,
606 					XFS_AG_RESV_NONE, 0);
607 			if (error)
608 				break;
609 
610 			/* Roll the transaction */
611 			error = xfs_defer_finish(tpp);
612 			if (error)
613 				break;
614 
615 			/* Remove the mapping from the CoW fork. */
616 			xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
617 
618 			/* Remove the quota reservation */
619 			xfs_quota_unreserve_blkres(ip, del.br_blockcount);
620 		} else {
621 			/* Didn't do anything, push cursor back. */
622 			xfs_iext_prev(ifp, &icur);
623 		}
624 next_extent:
625 		if (!xfs_iext_get_extent(ifp, &icur, &got))
626 			break;
627 	}
628 
629 	/* clear tag if cow fork is emptied */
630 	if (!ifp->if_bytes)
631 		xfs_inode_clear_cowblocks_tag(ip);
632 	return error;
633 }
634 
635 /*
636  * Cancel CoW reservations for some byte range of an inode.
637  *
638  * If cancel_real is true this function cancels all COW fork extents for the
639  * inode; if cancel_real is false, real extents are not cleared.
640  */
641 int
xfs_reflink_cancel_cow_range(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t count,bool cancel_real)642 xfs_reflink_cancel_cow_range(
643 	struct xfs_inode	*ip,
644 	xfs_off_t		offset,
645 	xfs_off_t		count,
646 	bool			cancel_real)
647 {
648 	struct xfs_trans	*tp;
649 	xfs_fileoff_t		offset_fsb;
650 	xfs_fileoff_t		end_fsb;
651 	int			error;
652 
653 	trace_xfs_reflink_cancel_cow_range(ip, offset, count);
654 	ASSERT(ip->i_cowfp);
655 
656 	offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
657 	if (count == NULLFILEOFF)
658 		end_fsb = NULLFILEOFF;
659 	else
660 		end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
661 
662 	/* Start a rolling transaction to remove the mappings */
663 	error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_write,
664 			0, 0, 0, &tp);
665 	if (error)
666 		goto out;
667 
668 	xfs_ilock(ip, XFS_ILOCK_EXCL);
669 	xfs_trans_ijoin(tp, ip, 0);
670 
671 	/* Scrape out the old CoW reservations */
672 	error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb,
673 			cancel_real);
674 	if (error)
675 		goto out_cancel;
676 
677 	error = xfs_trans_commit(tp);
678 
679 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
680 	return error;
681 
682 out_cancel:
683 	xfs_trans_cancel(tp);
684 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
685 out:
686 	trace_xfs_reflink_cancel_cow_range_error(ip, error, _RET_IP_);
687 	return error;
688 }
689 
690 /*
691  * Remap part of the CoW fork into the data fork.
692  *
693  * We aim to remap the range starting at @offset_fsb and ending at @end_fsb
694  * into the data fork; this function will remap what it can (at the end of the
695  * range) and update @end_fsb appropriately.  Each remap gets its own
696  * transaction because we can end up merging and splitting bmbt blocks for
697  * every remap operation and we'd like to keep the block reservation
698  * requirements as low as possible.
699  */
700 STATIC int
xfs_reflink_end_cow_extent(struct xfs_inode * ip,xfs_fileoff_t * offset_fsb,xfs_fileoff_t end_fsb)701 xfs_reflink_end_cow_extent(
702 	struct xfs_inode	*ip,
703 	xfs_fileoff_t		*offset_fsb,
704 	xfs_fileoff_t		end_fsb)
705 {
706 	struct xfs_iext_cursor	icur;
707 	struct xfs_bmbt_irec	got, del, data;
708 	struct xfs_mount	*mp = ip->i_mount;
709 	struct xfs_trans	*tp;
710 	struct xfs_ifork	*ifp = xfs_ifork_ptr(ip, XFS_COW_FORK);
711 	unsigned int		resblks;
712 	int			nmaps;
713 	int			error;
714 
715 	resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
716 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0,
717 			XFS_TRANS_RESERVE, &tp);
718 	if (error)
719 		return error;
720 
721 	/*
722 	 * Lock the inode.  We have to ijoin without automatic unlock because
723 	 * the lead transaction is the refcountbt record deletion; the data
724 	 * fork update follows as a deferred log item.
725 	 */
726 	xfs_ilock(ip, XFS_ILOCK_EXCL);
727 	xfs_trans_ijoin(tp, ip, 0);
728 
729 	/*
730 	 * In case of racing, overlapping AIO writes no COW extents might be
731 	 * left by the time I/O completes for the loser of the race.  In that
732 	 * case we are done.
733 	 */
734 	if (!xfs_iext_lookup_extent(ip, ifp, *offset_fsb, &icur, &got) ||
735 	    got.br_startoff >= end_fsb) {
736 		*offset_fsb = end_fsb;
737 		goto out_cancel;
738 	}
739 
740 	/*
741 	 * Only remap real extents that contain data.  With AIO, speculative
742 	 * preallocations can leak into the range we are called upon, and we
743 	 * need to skip them.  Preserve @got for the eventual CoW fork
744 	 * deletion; from now on @del represents the mapping that we're
745 	 * actually remapping.
746 	 */
747 	while (!xfs_bmap_is_written_extent(&got)) {
748 		if (!xfs_iext_next_extent(ifp, &icur, &got) ||
749 		    got.br_startoff >= end_fsb) {
750 			*offset_fsb = end_fsb;
751 			goto out_cancel;
752 		}
753 	}
754 	del = got;
755 	xfs_trim_extent(&del, *offset_fsb, end_fsb - *offset_fsb);
756 
757 	error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK,
758 			XFS_IEXT_REFLINK_END_COW_CNT);
759 	if (error)
760 		goto out_cancel;
761 
762 	/* Grab the corresponding mapping in the data fork. */
763 	nmaps = 1;
764 	error = xfs_bmapi_read(ip, del.br_startoff, del.br_blockcount, &data,
765 			&nmaps, 0);
766 	if (error)
767 		goto out_cancel;
768 
769 	/* We can only remap the smaller of the two extent sizes. */
770 	data.br_blockcount = min(data.br_blockcount, del.br_blockcount);
771 	del.br_blockcount = data.br_blockcount;
772 
773 	trace_xfs_reflink_cow_remap_from(ip, &del);
774 	trace_xfs_reflink_cow_remap_to(ip, &data);
775 
776 	if (xfs_bmap_is_real_extent(&data)) {
777 		/*
778 		 * If the extent we're remapping is backed by storage (written
779 		 * or not), unmap the extent and drop its refcount.
780 		 */
781 		xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &data);
782 		xfs_refcount_decrease_extent(tp, &data);
783 		xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT,
784 				-data.br_blockcount);
785 	} else if (data.br_startblock == DELAYSTARTBLOCK) {
786 		int		done;
787 
788 		/*
789 		 * If the extent we're remapping is a delalloc reservation,
790 		 * we can use the regular bunmapi function to release the
791 		 * incore state.  Dropping the delalloc reservation takes care
792 		 * of the quota reservation for us.
793 		 */
794 		error = xfs_bunmapi(NULL, ip, data.br_startoff,
795 				data.br_blockcount, 0, 1, &done);
796 		if (error)
797 			goto out_cancel;
798 		ASSERT(done);
799 	}
800 
801 	/* Free the CoW orphan record. */
802 	xfs_refcount_free_cow_extent(tp, del.br_startblock, del.br_blockcount);
803 
804 	/* Map the new blocks into the data fork. */
805 	xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, &del);
806 
807 	/* Charge this new data fork mapping to the on-disk quota. */
808 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_DELBCOUNT,
809 			(long)del.br_blockcount);
810 
811 	/* Remove the mapping from the CoW fork. */
812 	xfs_bmap_del_extent_cow(ip, &icur, &got, &del);
813 
814 	error = xfs_trans_commit(tp);
815 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
816 	if (error)
817 		return error;
818 
819 	/* Update the caller about how much progress we made. */
820 	*offset_fsb = del.br_startoff + del.br_blockcount;
821 	return 0;
822 
823 out_cancel:
824 	xfs_trans_cancel(tp);
825 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
826 	return error;
827 }
828 
829 /*
830  * Remap parts of a file's data fork after a successful CoW.
831  */
832 int
xfs_reflink_end_cow(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t count)833 xfs_reflink_end_cow(
834 	struct xfs_inode		*ip,
835 	xfs_off_t			offset,
836 	xfs_off_t			count)
837 {
838 	xfs_fileoff_t			offset_fsb;
839 	xfs_fileoff_t			end_fsb;
840 	int				error = 0;
841 
842 	trace_xfs_reflink_end_cow(ip, offset, count);
843 
844 	offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
845 	end_fsb = XFS_B_TO_FSB(ip->i_mount, offset + count);
846 
847 	/*
848 	 * Walk forwards until we've remapped the I/O range.  The loop function
849 	 * repeatedly cycles the ILOCK to allocate one transaction per remapped
850 	 * extent.
851 	 *
852 	 * If we're being called by writeback then the pages will still
853 	 * have PageWriteback set, which prevents races with reflink remapping
854 	 * and truncate.  Reflink remapping prevents races with writeback by
855 	 * taking the iolock and mmaplock before flushing the pages and
856 	 * remapping, which means there won't be any further writeback or page
857 	 * cache dirtying until the reflink completes.
858 	 *
859 	 * We should never have two threads issuing writeback for the same file
860 	 * region.  There are also have post-eof checks in the writeback
861 	 * preparation code so that we don't bother writing out pages that are
862 	 * about to be truncated.
863 	 *
864 	 * If we're being called as part of directio write completion, the dio
865 	 * count is still elevated, which reflink and truncate will wait for.
866 	 * Reflink remapping takes the iolock and mmaplock and waits for
867 	 * pending dio to finish, which should prevent any directio until the
868 	 * remap completes.  Multiple concurrent directio writes to the same
869 	 * region are handled by end_cow processing only occurring for the
870 	 * threads which succeed; the outcome of multiple overlapping direct
871 	 * writes is not well defined anyway.
872 	 *
873 	 * It's possible that a buffered write and a direct write could collide
874 	 * here (the buffered write stumbles in after the dio flushes and
875 	 * invalidates the page cache and immediately queues writeback), but we
876 	 * have never supported this 100%.  If either disk write succeeds the
877 	 * blocks will be remapped.
878 	 */
879 	while (end_fsb > offset_fsb && !error)
880 		error = xfs_reflink_end_cow_extent(ip, &offset_fsb, end_fsb);
881 
882 	if (error)
883 		trace_xfs_reflink_end_cow_error(ip, error, _RET_IP_);
884 	return error;
885 }
886 
887 /*
888  * Free all CoW staging blocks that are still referenced by the ondisk refcount
889  * metadata.  The ondisk metadata does not track which inode created the
890  * staging extent, so callers must ensure that there are no cached inodes with
891  * live CoW staging extents.
892  */
893 int
xfs_reflink_recover_cow(struct xfs_mount * mp)894 xfs_reflink_recover_cow(
895 	struct xfs_mount	*mp)
896 {
897 	struct xfs_perag	*pag;
898 	xfs_agnumber_t		agno;
899 	int			error = 0;
900 
901 	if (!xfs_has_reflink(mp))
902 		return 0;
903 
904 	for_each_perag(mp, agno, pag) {
905 		error = xfs_refcount_recover_cow_leftovers(mp, pag);
906 		if (error) {
907 			xfs_perag_rele(pag);
908 			break;
909 		}
910 	}
911 
912 	return error;
913 }
914 
915 /*
916  * Reflinking (Block) Ranges of Two Files Together
917  *
918  * First, ensure that the reflink flag is set on both inodes.  The flag is an
919  * optimization to avoid unnecessary refcount btree lookups in the write path.
920  *
921  * Now we can iteratively remap the range of extents (and holes) in src to the
922  * corresponding ranges in dest.  Let drange and srange denote the ranges of
923  * logical blocks in dest and src touched by the reflink operation.
924  *
925  * While the length of drange is greater than zero,
926  *    - Read src's bmbt at the start of srange ("imap")
927  *    - If imap doesn't exist, make imap appear to start at the end of srange
928  *      with zero length.
929  *    - If imap starts before srange, advance imap to start at srange.
930  *    - If imap goes beyond srange, truncate imap to end at the end of srange.
931  *    - Punch (imap start - srange start + imap len) blocks from dest at
932  *      offset (drange start).
933  *    - If imap points to a real range of pblks,
934  *         > Increase the refcount of the imap's pblks
935  *         > Map imap's pblks into dest at the offset
936  *           (drange start + imap start - srange start)
937  *    - Advance drange and srange by (imap start - srange start + imap len)
938  *
939  * Finally, if the reflink made dest longer, update both the in-core and
940  * on-disk file sizes.
941  *
942  * ASCII Art Demonstration:
943  *
944  * Let's say we want to reflink this source file:
945  *
946  * ----SSSSSSS-SSSSS----SSSSSS (src file)
947  *   <-------------------->
948  *
949  * into this destination file:
950  *
951  * --DDDDDDDDDDDDDDDDDDD--DDD (dest file)
952  *        <-------------------->
953  * '-' means a hole, and 'S' and 'D' are written blocks in the src and dest.
954  * Observe that the range has different logical offsets in either file.
955  *
956  * Consider that the first extent in the source file doesn't line up with our
957  * reflink range.  Unmapping  and remapping are separate operations, so we can
958  * unmap more blocks from the destination file than we remap.
959  *
960  * ----SSSSSSS-SSSSS----SSSSSS
961  *   <------->
962  * --DDDDD---------DDDDD--DDD
963  *        <------->
964  *
965  * Now remap the source extent into the destination file:
966  *
967  * ----SSSSSSS-SSSSS----SSSSSS
968  *   <------->
969  * --DDDDD--SSSSSSSDDDDD--DDD
970  *        <------->
971  *
972  * Do likewise with the second hole and extent in our range.  Holes in the
973  * unmap range don't affect our operation.
974  *
975  * ----SSSSSSS-SSSSS----SSSSSS
976  *            <---->
977  * --DDDDD--SSSSSSS-SSSSS-DDD
978  *                 <---->
979  *
980  * Finally, unmap and remap part of the third extent.  This will increase the
981  * size of the destination file.
982  *
983  * ----SSSSSSS-SSSSS----SSSSSS
984  *                  <----->
985  * --DDDDD--SSSSSSS-SSSSS----SSS
986  *                       <----->
987  *
988  * Once we update the destination file's i_size, we're done.
989  */
990 
991 /*
992  * Ensure the reflink bit is set in both inodes.
993  */
994 STATIC int
xfs_reflink_set_inode_flag(struct xfs_inode * src,struct xfs_inode * dest)995 xfs_reflink_set_inode_flag(
996 	struct xfs_inode	*src,
997 	struct xfs_inode	*dest)
998 {
999 	struct xfs_mount	*mp = src->i_mount;
1000 	int			error;
1001 	struct xfs_trans	*tp;
1002 
1003 	if (xfs_is_reflink_inode(src) && xfs_is_reflink_inode(dest))
1004 		return 0;
1005 
1006 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
1007 	if (error)
1008 		goto out_error;
1009 
1010 	/* Lock both files against IO */
1011 	if (src->i_ino == dest->i_ino)
1012 		xfs_ilock(src, XFS_ILOCK_EXCL);
1013 	else
1014 		xfs_lock_two_inodes(src, XFS_ILOCK_EXCL, dest, XFS_ILOCK_EXCL);
1015 
1016 	if (!xfs_is_reflink_inode(src)) {
1017 		trace_xfs_reflink_set_inode_flag(src);
1018 		xfs_trans_ijoin(tp, src, XFS_ILOCK_EXCL);
1019 		src->i_diflags2 |= XFS_DIFLAG2_REFLINK;
1020 		xfs_trans_log_inode(tp, src, XFS_ILOG_CORE);
1021 		xfs_ifork_init_cow(src);
1022 	} else
1023 		xfs_iunlock(src, XFS_ILOCK_EXCL);
1024 
1025 	if (src->i_ino == dest->i_ino)
1026 		goto commit_flags;
1027 
1028 	if (!xfs_is_reflink_inode(dest)) {
1029 		trace_xfs_reflink_set_inode_flag(dest);
1030 		xfs_trans_ijoin(tp, dest, XFS_ILOCK_EXCL);
1031 		dest->i_diflags2 |= XFS_DIFLAG2_REFLINK;
1032 		xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE);
1033 		xfs_ifork_init_cow(dest);
1034 	} else
1035 		xfs_iunlock(dest, XFS_ILOCK_EXCL);
1036 
1037 commit_flags:
1038 	error = xfs_trans_commit(tp);
1039 	if (error)
1040 		goto out_error;
1041 	return error;
1042 
1043 out_error:
1044 	trace_xfs_reflink_set_inode_flag_error(dest, error, _RET_IP_);
1045 	return error;
1046 }
1047 
1048 /*
1049  * Update destination inode size & cowextsize hint, if necessary.
1050  */
1051 int
xfs_reflink_update_dest(struct xfs_inode * dest,xfs_off_t newlen,xfs_extlen_t cowextsize,unsigned int remap_flags)1052 xfs_reflink_update_dest(
1053 	struct xfs_inode	*dest,
1054 	xfs_off_t		newlen,
1055 	xfs_extlen_t		cowextsize,
1056 	unsigned int		remap_flags)
1057 {
1058 	struct xfs_mount	*mp = dest->i_mount;
1059 	struct xfs_trans	*tp;
1060 	int			error;
1061 
1062 	if (newlen <= i_size_read(VFS_I(dest)) && cowextsize == 0)
1063 		return 0;
1064 
1065 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
1066 	if (error)
1067 		goto out_error;
1068 
1069 	xfs_ilock(dest, XFS_ILOCK_EXCL);
1070 	xfs_trans_ijoin(tp, dest, XFS_ILOCK_EXCL);
1071 
1072 	if (newlen > i_size_read(VFS_I(dest))) {
1073 		trace_xfs_reflink_update_inode_size(dest, newlen);
1074 		i_size_write(VFS_I(dest), newlen);
1075 		dest->i_disk_size = newlen;
1076 	}
1077 
1078 	if (cowextsize) {
1079 		dest->i_cowextsize = cowextsize;
1080 		dest->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
1081 	}
1082 
1083 	xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE);
1084 
1085 	error = xfs_trans_commit(tp);
1086 	if (error)
1087 		goto out_error;
1088 	return error;
1089 
1090 out_error:
1091 	trace_xfs_reflink_update_inode_size_error(dest, error, _RET_IP_);
1092 	return error;
1093 }
1094 
1095 /*
1096  * Do we have enough reserve in this AG to handle a reflink?  The refcount
1097  * btree already reserved all the space it needs, but the rmap btree can grow
1098  * infinitely, so we won't allow more reflinks when the AG is down to the
1099  * btree reserves.
1100  */
1101 static int
xfs_reflink_ag_has_free_space(struct xfs_mount * mp,xfs_agnumber_t agno)1102 xfs_reflink_ag_has_free_space(
1103 	struct xfs_mount	*mp,
1104 	xfs_agnumber_t		agno)
1105 {
1106 	struct xfs_perag	*pag;
1107 	int			error = 0;
1108 
1109 	if (!xfs_has_rmapbt(mp))
1110 		return 0;
1111 
1112 	pag = xfs_perag_get(mp, agno);
1113 	if (xfs_ag_resv_critical(pag, XFS_AG_RESV_RMAPBT) ||
1114 	    xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA))
1115 		error = -ENOSPC;
1116 	xfs_perag_put(pag);
1117 	return error;
1118 }
1119 
1120 /*
1121  * Remap the given extent into the file.  The dmap blockcount will be set to
1122  * the number of blocks that were actually remapped.
1123  */
1124 STATIC int
xfs_reflink_remap_extent(struct xfs_inode * ip,struct xfs_bmbt_irec * dmap,xfs_off_t new_isize)1125 xfs_reflink_remap_extent(
1126 	struct xfs_inode	*ip,
1127 	struct xfs_bmbt_irec	*dmap,
1128 	xfs_off_t		new_isize)
1129 {
1130 	struct xfs_bmbt_irec	smap;
1131 	struct xfs_mount	*mp = ip->i_mount;
1132 	struct xfs_trans	*tp;
1133 	xfs_off_t		newlen;
1134 	int64_t			qdelta = 0;
1135 	unsigned int		resblks;
1136 	bool			quota_reserved = true;
1137 	bool			smap_real;
1138 	bool			dmap_written = xfs_bmap_is_written_extent(dmap);
1139 	int			iext_delta = 0;
1140 	int			nimaps;
1141 	int			error;
1142 
1143 	/*
1144 	 * Start a rolling transaction to switch the mappings.
1145 	 *
1146 	 * Adding a written extent to the extent map can cause a bmbt split,
1147 	 * and removing a mapped extent from the extent can cause a bmbt split.
1148 	 * The two operations cannot both cause a split since they operate on
1149 	 * the same index in the bmap btree, so we only need a reservation for
1150 	 * one bmbt split if either thing is happening.  However, we haven't
1151 	 * locked the inode yet, so we reserve assuming this is the case.
1152 	 *
1153 	 * The first allocation call tries to reserve enough space to handle
1154 	 * mapping dmap into a sparse part of the file plus the bmbt split.  We
1155 	 * haven't locked the inode or read the existing mapping yet, so we do
1156 	 * not know for sure that we need the space.  This should succeed most
1157 	 * of the time.
1158 	 *
1159 	 * If the first attempt fails, try again but reserving only enough
1160 	 * space to handle a bmbt split.  This is the hard minimum requirement,
1161 	 * and we revisit quota reservations later when we know more about what
1162 	 * we're remapping.
1163 	 */
1164 	resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK);
1165 	error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
1166 			resblks + dmap->br_blockcount, 0, false, &tp);
1167 	if (error == -EDQUOT || error == -ENOSPC) {
1168 		quota_reserved = false;
1169 		error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write,
1170 				resblks, 0, false, &tp);
1171 	}
1172 	if (error)
1173 		goto out;
1174 
1175 	/*
1176 	 * Read what's currently mapped in the destination file into smap.
1177 	 * If smap isn't a hole, we will have to remove it before we can add
1178 	 * dmap to the destination file.
1179 	 */
1180 	nimaps = 1;
1181 	error = xfs_bmapi_read(ip, dmap->br_startoff, dmap->br_blockcount,
1182 			&smap, &nimaps, 0);
1183 	if (error)
1184 		goto out_cancel;
1185 	ASSERT(nimaps == 1 && smap.br_startoff == dmap->br_startoff);
1186 	smap_real = xfs_bmap_is_real_extent(&smap);
1187 
1188 	/*
1189 	 * We can only remap as many blocks as the smaller of the two extent
1190 	 * maps, because we can only remap one extent at a time.
1191 	 */
1192 	dmap->br_blockcount = min(dmap->br_blockcount, smap.br_blockcount);
1193 	ASSERT(dmap->br_blockcount == smap.br_blockcount);
1194 
1195 	trace_xfs_reflink_remap_extent_dest(ip, &smap);
1196 
1197 	/*
1198 	 * Two extents mapped to the same physical block must not have
1199 	 * different states; that's filesystem corruption.  Move on to the next
1200 	 * extent if they're both holes or both the same physical extent.
1201 	 */
1202 	if (dmap->br_startblock == smap.br_startblock) {
1203 		if (dmap->br_state != smap.br_state) {
1204 			xfs_bmap_mark_sick(ip, XFS_DATA_FORK);
1205 			error = -EFSCORRUPTED;
1206 		}
1207 		goto out_cancel;
1208 	}
1209 
1210 	/* If both extents are unwritten, leave them alone. */
1211 	if (dmap->br_state == XFS_EXT_UNWRITTEN &&
1212 	    smap.br_state == XFS_EXT_UNWRITTEN)
1213 		goto out_cancel;
1214 
1215 	/* No reflinking if the AG of the dest mapping is low on space. */
1216 	if (dmap_written) {
1217 		error = xfs_reflink_ag_has_free_space(mp,
1218 				XFS_FSB_TO_AGNO(mp, dmap->br_startblock));
1219 		if (error)
1220 			goto out_cancel;
1221 	}
1222 
1223 	/*
1224 	 * Increase quota reservation if we think the quota block counter for
1225 	 * this file could increase.
1226 	 *
1227 	 * If we are mapping a written extent into the file, we need to have
1228 	 * enough quota block count reservation to handle the blocks in that
1229 	 * extent.  We log only the delta to the quota block counts, so if the
1230 	 * extent we're unmapping also has blocks allocated to it, we don't
1231 	 * need a quota reservation for the extent itself.
1232 	 *
1233 	 * Note that if we're replacing a delalloc reservation with a written
1234 	 * extent, we have to take the full quota reservation because removing
1235 	 * the delalloc reservation gives the block count back to the quota
1236 	 * count.  This is suboptimal, but the VFS flushed the dest range
1237 	 * before we started.  That should have removed all the delalloc
1238 	 * reservations, but we code defensively.
1239 	 *
1240 	 * xfs_trans_alloc_inode above already tried to grab an even larger
1241 	 * quota reservation, and kicked off a blockgc scan if it couldn't.
1242 	 * If we can't get a potentially smaller quota reservation now, we're
1243 	 * done.
1244 	 */
1245 	if (!quota_reserved && !smap_real && dmap_written) {
1246 		error = xfs_trans_reserve_quota_nblks(tp, ip,
1247 				dmap->br_blockcount, 0, false);
1248 		if (error)
1249 			goto out_cancel;
1250 	}
1251 
1252 	if (smap_real)
1253 		++iext_delta;
1254 
1255 	if (dmap_written)
1256 		++iext_delta;
1257 
1258 	error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK, iext_delta);
1259 	if (error)
1260 		goto out_cancel;
1261 
1262 	if (smap_real) {
1263 		/*
1264 		 * If the extent we're unmapping is backed by storage (written
1265 		 * or not), unmap the extent and drop its refcount.
1266 		 */
1267 		xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &smap);
1268 		xfs_refcount_decrease_extent(tp, &smap);
1269 		qdelta -= smap.br_blockcount;
1270 	} else if (smap.br_startblock == DELAYSTARTBLOCK) {
1271 		int		done;
1272 
1273 		/*
1274 		 * If the extent we're unmapping is a delalloc reservation,
1275 		 * we can use the regular bunmapi function to release the
1276 		 * incore state.  Dropping the delalloc reservation takes care
1277 		 * of the quota reservation for us.
1278 		 */
1279 		error = xfs_bunmapi(NULL, ip, smap.br_startoff,
1280 				smap.br_blockcount, 0, 1, &done);
1281 		if (error)
1282 			goto out_cancel;
1283 		ASSERT(done);
1284 	}
1285 
1286 	/*
1287 	 * If the extent we're sharing is backed by written storage, increase
1288 	 * its refcount and map it into the file.
1289 	 */
1290 	if (dmap_written) {
1291 		xfs_refcount_increase_extent(tp, dmap);
1292 		xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, dmap);
1293 		qdelta += dmap->br_blockcount;
1294 	}
1295 
1296 	xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, qdelta);
1297 
1298 	/* Update dest isize if needed. */
1299 	newlen = XFS_FSB_TO_B(mp, dmap->br_startoff + dmap->br_blockcount);
1300 	newlen = min_t(xfs_off_t, newlen, new_isize);
1301 	if (newlen > i_size_read(VFS_I(ip))) {
1302 		trace_xfs_reflink_update_inode_size(ip, newlen);
1303 		i_size_write(VFS_I(ip), newlen);
1304 		ip->i_disk_size = newlen;
1305 		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1306 	}
1307 
1308 	/* Commit everything and unlock. */
1309 	error = xfs_trans_commit(tp);
1310 	goto out_unlock;
1311 
1312 out_cancel:
1313 	xfs_trans_cancel(tp);
1314 out_unlock:
1315 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1316 out:
1317 	if (error)
1318 		trace_xfs_reflink_remap_extent_error(ip, error, _RET_IP_);
1319 	return error;
1320 }
1321 
1322 /* Remap a range of one file to the other. */
1323 int
xfs_reflink_remap_blocks(struct xfs_inode * src,loff_t pos_in,struct xfs_inode * dest,loff_t pos_out,loff_t remap_len,loff_t * remapped)1324 xfs_reflink_remap_blocks(
1325 	struct xfs_inode	*src,
1326 	loff_t			pos_in,
1327 	struct xfs_inode	*dest,
1328 	loff_t			pos_out,
1329 	loff_t			remap_len,
1330 	loff_t			*remapped)
1331 {
1332 	struct xfs_bmbt_irec	imap;
1333 	struct xfs_mount	*mp = src->i_mount;
1334 	xfs_fileoff_t		srcoff = XFS_B_TO_FSBT(mp, pos_in);
1335 	xfs_fileoff_t		destoff = XFS_B_TO_FSBT(mp, pos_out);
1336 	xfs_filblks_t		len;
1337 	xfs_filblks_t		remapped_len = 0;
1338 	xfs_off_t		new_isize = pos_out + remap_len;
1339 	int			nimaps;
1340 	int			error = 0;
1341 
1342 	len = min_t(xfs_filblks_t, XFS_B_TO_FSB(mp, remap_len),
1343 			XFS_MAX_FILEOFF);
1344 
1345 	trace_xfs_reflink_remap_blocks(src, srcoff, len, dest, destoff);
1346 
1347 	while (len > 0) {
1348 		unsigned int	lock_mode;
1349 
1350 		/* Read extent from the source file */
1351 		nimaps = 1;
1352 		lock_mode = xfs_ilock_data_map_shared(src);
1353 		error = xfs_bmapi_read(src, srcoff, len, &imap, &nimaps, 0);
1354 		xfs_iunlock(src, lock_mode);
1355 		if (error)
1356 			break;
1357 		/*
1358 		 * The caller supposedly flushed all dirty pages in the source
1359 		 * file range, which means that writeback should have allocated
1360 		 * or deleted all delalloc reservations in that range.  If we
1361 		 * find one, that's a good sign that something is seriously
1362 		 * wrong here.
1363 		 */
1364 		ASSERT(nimaps == 1 && imap.br_startoff == srcoff);
1365 		if (imap.br_startblock == DELAYSTARTBLOCK) {
1366 			ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1367 			xfs_bmap_mark_sick(src, XFS_DATA_FORK);
1368 			error = -EFSCORRUPTED;
1369 			break;
1370 		}
1371 
1372 		trace_xfs_reflink_remap_extent_src(src, &imap);
1373 
1374 		/* Remap into the destination file at the given offset. */
1375 		imap.br_startoff = destoff;
1376 		error = xfs_reflink_remap_extent(dest, &imap, new_isize);
1377 		if (error)
1378 			break;
1379 
1380 		if (fatal_signal_pending(current)) {
1381 			error = -EINTR;
1382 			break;
1383 		}
1384 
1385 		/* Advance drange/srange */
1386 		srcoff += imap.br_blockcount;
1387 		destoff += imap.br_blockcount;
1388 		len -= imap.br_blockcount;
1389 		remapped_len += imap.br_blockcount;
1390 		cond_resched();
1391 	}
1392 
1393 	if (error)
1394 		trace_xfs_reflink_remap_blocks_error(dest, error, _RET_IP_);
1395 	*remapped = min_t(loff_t, remap_len,
1396 			  XFS_FSB_TO_B(src->i_mount, remapped_len));
1397 	return error;
1398 }
1399 
1400 /*
1401  * If we're reflinking to a point past the destination file's EOF, we must
1402  * zero any speculative post-EOF preallocations that sit between the old EOF
1403  * and the destination file offset.
1404  */
1405 static int
xfs_reflink_zero_posteof(struct xfs_inode * ip,loff_t pos)1406 xfs_reflink_zero_posteof(
1407 	struct xfs_inode	*ip,
1408 	loff_t			pos)
1409 {
1410 	loff_t			isize = i_size_read(VFS_I(ip));
1411 
1412 	if (pos <= isize)
1413 		return 0;
1414 
1415 	trace_xfs_zero_eof(ip, isize, pos - isize);
1416 	return xfs_zero_range(ip, isize, pos - isize, NULL);
1417 }
1418 
1419 /*
1420  * Prepare two files for range cloning.  Upon a successful return both inodes
1421  * will have the iolock and mmaplock held, the page cache of the out file will
1422  * be truncated, and any leases on the out file will have been broken.  This
1423  * function borrows heavily from xfs_file_aio_write_checks.
1424  *
1425  * The VFS allows partial EOF blocks to "match" for dedupe even though it hasn't
1426  * checked that the bytes beyond EOF physically match. Hence we cannot use the
1427  * EOF block in the source dedupe range because it's not a complete block match,
1428  * hence can introduce a corruption into the file that has it's block replaced.
1429  *
1430  * In similar fashion, the VFS file cloning also allows partial EOF blocks to be
1431  * "block aligned" for the purposes of cloning entire files.  However, if the
1432  * source file range includes the EOF block and it lands within the existing EOF
1433  * of the destination file, then we can expose stale data from beyond the source
1434  * file EOF in the destination file.
1435  *
1436  * XFS doesn't support partial block sharing, so in both cases we have check
1437  * these cases ourselves. For dedupe, we can simply round the length to dedupe
1438  * down to the previous whole block and ignore the partial EOF block. While this
1439  * means we can't dedupe the last block of a file, this is an acceptible
1440  * tradeoff for simplicity on implementation.
1441  *
1442  * For cloning, we want to share the partial EOF block if it is also the new EOF
1443  * block of the destination file. If the partial EOF block lies inside the
1444  * existing destination EOF, then we have to abort the clone to avoid exposing
1445  * stale data in the destination file. Hence we reject these clone attempts with
1446  * -EINVAL in this case.
1447  */
1448 int
xfs_reflink_remap_prep(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,loff_t * len,unsigned int remap_flags)1449 xfs_reflink_remap_prep(
1450 	struct file		*file_in,
1451 	loff_t			pos_in,
1452 	struct file		*file_out,
1453 	loff_t			pos_out,
1454 	loff_t			*len,
1455 	unsigned int		remap_flags)
1456 {
1457 	struct inode		*inode_in = file_inode(file_in);
1458 	struct xfs_inode	*src = XFS_I(inode_in);
1459 	struct inode		*inode_out = file_inode(file_out);
1460 	struct xfs_inode	*dest = XFS_I(inode_out);
1461 	int			ret;
1462 
1463 	/* Lock both files against IO */
1464 	ret = xfs_ilock2_io_mmap(src, dest);
1465 	if (ret)
1466 		return ret;
1467 
1468 	/* Check file eligibility and prepare for block sharing. */
1469 	ret = -EINVAL;
1470 	/* Don't reflink realtime inodes */
1471 	if (XFS_IS_REALTIME_INODE(src) || XFS_IS_REALTIME_INODE(dest))
1472 		goto out_unlock;
1473 
1474 	/* Don't share DAX file data with non-DAX file. */
1475 	if (IS_DAX(inode_in) != IS_DAX(inode_out))
1476 		goto out_unlock;
1477 
1478 	if (!IS_DAX(inode_in))
1479 		ret = generic_remap_file_range_prep(file_in, pos_in, file_out,
1480 				pos_out, len, remap_flags);
1481 	else
1482 		ret = dax_remap_file_range_prep(file_in, pos_in, file_out,
1483 				pos_out, len, remap_flags, &xfs_read_iomap_ops);
1484 	if (ret || *len == 0)
1485 		goto out_unlock;
1486 
1487 	/* Attach dquots to dest inode before changing block map */
1488 	ret = xfs_qm_dqattach(dest);
1489 	if (ret)
1490 		goto out_unlock;
1491 
1492 	/*
1493 	 * Zero existing post-eof speculative preallocations in the destination
1494 	 * file.
1495 	 */
1496 	ret = xfs_reflink_zero_posteof(dest, pos_out);
1497 	if (ret)
1498 		goto out_unlock;
1499 
1500 	/* Set flags and remap blocks. */
1501 	ret = xfs_reflink_set_inode_flag(src, dest);
1502 	if (ret)
1503 		goto out_unlock;
1504 
1505 	/*
1506 	 * If pos_out > EOF, we may have dirtied blocks between EOF and
1507 	 * pos_out. In that case, we need to extend the flush and unmap to cover
1508 	 * from EOF to the end of the copy length.
1509 	 */
1510 	if (pos_out > XFS_ISIZE(dest)) {
1511 		loff_t	flen = *len + (pos_out - XFS_ISIZE(dest));
1512 		ret = xfs_flush_unmap_range(dest, XFS_ISIZE(dest), flen);
1513 	} else {
1514 		ret = xfs_flush_unmap_range(dest, pos_out, *len);
1515 	}
1516 	if (ret)
1517 		goto out_unlock;
1518 
1519 	xfs_iflags_set(src, XFS_IREMAPPING);
1520 	if (inode_in != inode_out)
1521 		xfs_ilock_demote(src, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL);
1522 
1523 	return 0;
1524 out_unlock:
1525 	xfs_iunlock2_io_mmap(src, dest);
1526 	return ret;
1527 }
1528 
1529 /* Does this inode need the reflink flag? */
1530 int
xfs_reflink_inode_has_shared_extents(struct xfs_trans * tp,struct xfs_inode * ip,bool * has_shared)1531 xfs_reflink_inode_has_shared_extents(
1532 	struct xfs_trans		*tp,
1533 	struct xfs_inode		*ip,
1534 	bool				*has_shared)
1535 {
1536 	struct xfs_bmbt_irec		got;
1537 	struct xfs_mount		*mp = ip->i_mount;
1538 	struct xfs_ifork		*ifp;
1539 	struct xfs_iext_cursor		icur;
1540 	bool				found;
1541 	int				error;
1542 
1543 	ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK);
1544 	error = xfs_iread_extents(tp, ip, XFS_DATA_FORK);
1545 	if (error)
1546 		return error;
1547 
1548 	*has_shared = false;
1549 	found = xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got);
1550 	while (found) {
1551 		struct xfs_perag	*pag;
1552 		xfs_agblock_t		agbno;
1553 		xfs_extlen_t		aglen;
1554 		xfs_agblock_t		rbno;
1555 		xfs_extlen_t		rlen;
1556 
1557 		if (isnullstartblock(got.br_startblock) ||
1558 		    got.br_state != XFS_EXT_NORM)
1559 			goto next;
1560 
1561 		pag = xfs_perag_get(mp, XFS_FSB_TO_AGNO(mp, got.br_startblock));
1562 		agbno = XFS_FSB_TO_AGBNO(mp, got.br_startblock);
1563 		aglen = got.br_blockcount;
1564 		error = xfs_reflink_find_shared(pag, tp, agbno, aglen,
1565 				&rbno, &rlen, false);
1566 		xfs_perag_put(pag);
1567 		if (error)
1568 			return error;
1569 
1570 		/* Is there still a shared block here? */
1571 		if (rbno != NULLAGBLOCK) {
1572 			*has_shared = true;
1573 			return 0;
1574 		}
1575 next:
1576 		found = xfs_iext_next_extent(ifp, &icur, &got);
1577 	}
1578 
1579 	return 0;
1580 }
1581 
1582 /*
1583  * Clear the inode reflink flag if there are no shared extents.
1584  *
1585  * The caller is responsible for joining the inode to the transaction passed in.
1586  * The inode will be joined to the transaction that is returned to the caller.
1587  */
1588 int
xfs_reflink_clear_inode_flag(struct xfs_inode * ip,struct xfs_trans ** tpp)1589 xfs_reflink_clear_inode_flag(
1590 	struct xfs_inode	*ip,
1591 	struct xfs_trans	**tpp)
1592 {
1593 	bool			needs_flag;
1594 	int			error = 0;
1595 
1596 	ASSERT(xfs_is_reflink_inode(ip));
1597 
1598 	error = xfs_reflink_inode_has_shared_extents(*tpp, ip, &needs_flag);
1599 	if (error || needs_flag)
1600 		return error;
1601 
1602 	/*
1603 	 * We didn't find any shared blocks so turn off the reflink flag.
1604 	 * First, get rid of any leftover CoW mappings.
1605 	 */
1606 	error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, XFS_MAX_FILEOFF,
1607 			true);
1608 	if (error)
1609 		return error;
1610 
1611 	/* Clear the inode flag. */
1612 	trace_xfs_reflink_unset_inode_flag(ip);
1613 	ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1614 	xfs_inode_clear_cowblocks_tag(ip);
1615 	xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);
1616 
1617 	return error;
1618 }
1619 
1620 /*
1621  * Clear the inode reflink flag if there are no shared extents and the size
1622  * hasn't changed.
1623  */
1624 STATIC int
xfs_reflink_try_clear_inode_flag(struct xfs_inode * ip)1625 xfs_reflink_try_clear_inode_flag(
1626 	struct xfs_inode	*ip)
1627 {
1628 	struct xfs_mount	*mp = ip->i_mount;
1629 	struct xfs_trans	*tp;
1630 	int			error = 0;
1631 
1632 	/* Start a rolling transaction to remove the mappings */
1633 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, 0, 0, 0, &tp);
1634 	if (error)
1635 		return error;
1636 
1637 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1638 	xfs_trans_ijoin(tp, ip, 0);
1639 
1640 	error = xfs_reflink_clear_inode_flag(ip, &tp);
1641 	if (error)
1642 		goto cancel;
1643 
1644 	error = xfs_trans_commit(tp);
1645 	if (error)
1646 		goto out;
1647 
1648 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1649 	return 0;
1650 cancel:
1651 	xfs_trans_cancel(tp);
1652 out:
1653 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1654 	return error;
1655 }
1656 
1657 /*
1658  * Pre-COW all shared blocks within a given byte range of a file and turn off
1659  * the reflink flag if we unshare all of the file's blocks.
1660  */
1661 int
xfs_reflink_unshare(struct xfs_inode * ip,xfs_off_t offset,xfs_off_t len)1662 xfs_reflink_unshare(
1663 	struct xfs_inode	*ip,
1664 	xfs_off_t		offset,
1665 	xfs_off_t		len)
1666 {
1667 	struct inode		*inode = VFS_I(ip);
1668 	int			error;
1669 
1670 	if (!xfs_is_reflink_inode(ip))
1671 		return 0;
1672 
1673 	trace_xfs_reflink_unshare(ip, offset, len);
1674 
1675 	inode_dio_wait(inode);
1676 
1677 	if (IS_DAX(inode))
1678 		error = dax_file_unshare(inode, offset, len,
1679 				&xfs_dax_write_iomap_ops);
1680 	else
1681 		error = iomap_file_unshare(inode, offset, len,
1682 				&xfs_buffered_write_iomap_ops);
1683 	if (error)
1684 		goto out;
1685 
1686 	error = filemap_write_and_wait_range(inode->i_mapping, offset,
1687 			offset + len - 1);
1688 	if (error)
1689 		goto out;
1690 
1691 	/* Turn off the reflink flag if possible. */
1692 	error = xfs_reflink_try_clear_inode_flag(ip);
1693 	if (error)
1694 		goto out;
1695 	return 0;
1696 
1697 out:
1698 	trace_xfs_reflink_unshare_error(ip, error, _RET_IP_);
1699 	return error;
1700 }
1701