xref: /linux/fs/xfs/xfs_trans.c (revision 06bd48b6cd97ef3889b68c8e09014d81dbc463f1)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4  * Copyright (C) 2010 Red Hat, Inc.
5  * All Rights Reserved.
6  */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_extent_busy.h"
15 #include "xfs_quota.h"
16 #include "xfs_trans.h"
17 #include "xfs_trans_priv.h"
18 #include "xfs_log.h"
19 #include "xfs_trace.h"
20 #include "xfs_error.h"
21 #include "xfs_defer.h"
22 
23 kmem_zone_t	*xfs_trans_zone;
24 
25 #if defined(CONFIG_TRACEPOINTS)
26 static void
27 xfs_trans_trace_reservations(
28 	struct xfs_mount	*mp)
29 {
30 	struct xfs_trans_res	resv;
31 	struct xfs_trans_res	*res;
32 	struct xfs_trans_res	*end_res;
33 	int			i;
34 
35 	res = (struct xfs_trans_res *)M_RES(mp);
36 	end_res = (struct xfs_trans_res *)(M_RES(mp) + 1);
37 	for (i = 0; res < end_res; i++, res++)
38 		trace_xfs_trans_resv_calc(mp, i, res);
39 	xfs_log_get_max_trans_res(mp, &resv);
40 	trace_xfs_trans_resv_calc(mp, -1, &resv);
41 }
42 #else
43 # define xfs_trans_trace_reservations(mp)
44 #endif
45 
46 /*
47  * Initialize the precomputed transaction reservation values
48  * in the mount structure.
49  */
50 void
51 xfs_trans_init(
52 	struct xfs_mount	*mp)
53 {
54 	xfs_trans_resv_calc(mp, M_RES(mp));
55 	xfs_trans_trace_reservations(mp);
56 }
57 
58 /*
59  * Free the transaction structure.  If there is more clean up
60  * to do when the structure is freed, add it here.
61  */
62 STATIC void
63 xfs_trans_free(
64 	struct xfs_trans	*tp)
65 {
66 	xfs_extent_busy_sort(&tp->t_busy);
67 	xfs_extent_busy_clear(tp->t_mountp, &tp->t_busy, false);
68 
69 	trace_xfs_trans_free(tp, _RET_IP_);
70 	atomic_dec(&tp->t_mountp->m_active_trans);
71 	if (!(tp->t_flags & XFS_TRANS_NO_WRITECOUNT))
72 		sb_end_intwrite(tp->t_mountp->m_super);
73 	xfs_trans_free_dqinfo(tp);
74 	kmem_cache_free(xfs_trans_zone, tp);
75 }
76 
77 /*
78  * This is called to create a new transaction which will share the
79  * permanent log reservation of the given transaction.  The remaining
80  * unused block and rt extent reservations are also inherited.  This
81  * implies that the original transaction is no longer allowed to allocate
82  * blocks.  Locks and log items, however, are no inherited.  They must
83  * be added to the new transaction explicitly.
84  */
85 STATIC struct xfs_trans *
86 xfs_trans_dup(
87 	struct xfs_trans	*tp)
88 {
89 	struct xfs_trans	*ntp;
90 
91 	trace_xfs_trans_dup(tp, _RET_IP_);
92 
93 	ntp = kmem_zone_zalloc(xfs_trans_zone, 0);
94 
95 	/*
96 	 * Initialize the new transaction structure.
97 	 */
98 	ntp->t_magic = XFS_TRANS_HEADER_MAGIC;
99 	ntp->t_mountp = tp->t_mountp;
100 	INIT_LIST_HEAD(&ntp->t_items);
101 	INIT_LIST_HEAD(&ntp->t_busy);
102 	INIT_LIST_HEAD(&ntp->t_dfops);
103 	ntp->t_firstblock = NULLFSBLOCK;
104 
105 	ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
106 	ASSERT(tp->t_ticket != NULL);
107 
108 	ntp->t_flags = XFS_TRANS_PERM_LOG_RES |
109 		       (tp->t_flags & XFS_TRANS_RESERVE) |
110 		       (tp->t_flags & XFS_TRANS_NO_WRITECOUNT);
111 	/* We gave our writer reference to the new transaction */
112 	tp->t_flags |= XFS_TRANS_NO_WRITECOUNT;
113 	ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket);
114 
115 	ASSERT(tp->t_blk_res >= tp->t_blk_res_used);
116 	ntp->t_blk_res = tp->t_blk_res - tp->t_blk_res_used;
117 	tp->t_blk_res = tp->t_blk_res_used;
118 
119 	ntp->t_rtx_res = tp->t_rtx_res - tp->t_rtx_res_used;
120 	tp->t_rtx_res = tp->t_rtx_res_used;
121 	ntp->t_pflags = tp->t_pflags;
122 
123 	/* move deferred ops over to the new tp */
124 	xfs_defer_move(ntp, tp);
125 
126 	xfs_trans_dup_dqinfo(tp, ntp);
127 
128 	atomic_inc(&tp->t_mountp->m_active_trans);
129 	return ntp;
130 }
131 
132 /*
133  * This is called to reserve free disk blocks and log space for the
134  * given transaction.  This must be done before allocating any resources
135  * within the transaction.
136  *
137  * This will return ENOSPC if there are not enough blocks available.
138  * It will sleep waiting for available log space.
139  * The only valid value for the flags parameter is XFS_RES_LOG_PERM, which
140  * is used by long running transactions.  If any one of the reservations
141  * fails then they will all be backed out.
142  *
143  * This does not do quota reservations. That typically is done by the
144  * caller afterwards.
145  */
146 static int
147 xfs_trans_reserve(
148 	struct xfs_trans	*tp,
149 	struct xfs_trans_res	*resp,
150 	uint			blocks,
151 	uint			rtextents)
152 {
153 	int		error = 0;
154 	bool		rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
155 
156 	/* Mark this thread as being in a transaction */
157 	current_set_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
158 
159 	/*
160 	 * Attempt to reserve the needed disk blocks by decrementing
161 	 * the number needed from the number available.  This will
162 	 * fail if the count would go below zero.
163 	 */
164 	if (blocks > 0) {
165 		error = xfs_mod_fdblocks(tp->t_mountp, -((int64_t)blocks), rsvd);
166 		if (error != 0) {
167 			current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
168 			return -ENOSPC;
169 		}
170 		tp->t_blk_res += blocks;
171 	}
172 
173 	/*
174 	 * Reserve the log space needed for this transaction.
175 	 */
176 	if (resp->tr_logres > 0) {
177 		bool	permanent = false;
178 
179 		ASSERT(tp->t_log_res == 0 ||
180 		       tp->t_log_res == resp->tr_logres);
181 		ASSERT(tp->t_log_count == 0 ||
182 		       tp->t_log_count == resp->tr_logcount);
183 
184 		if (resp->tr_logflags & XFS_TRANS_PERM_LOG_RES) {
185 			tp->t_flags |= XFS_TRANS_PERM_LOG_RES;
186 			permanent = true;
187 		} else {
188 			ASSERT(tp->t_ticket == NULL);
189 			ASSERT(!(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
190 		}
191 
192 		if (tp->t_ticket != NULL) {
193 			ASSERT(resp->tr_logflags & XFS_TRANS_PERM_LOG_RES);
194 			error = xfs_log_regrant(tp->t_mountp, tp->t_ticket);
195 		} else {
196 			error = xfs_log_reserve(tp->t_mountp,
197 						resp->tr_logres,
198 						resp->tr_logcount,
199 						&tp->t_ticket, XFS_TRANSACTION,
200 						permanent);
201 		}
202 
203 		if (error)
204 			goto undo_blocks;
205 
206 		tp->t_log_res = resp->tr_logres;
207 		tp->t_log_count = resp->tr_logcount;
208 	}
209 
210 	/*
211 	 * Attempt to reserve the needed realtime extents by decrementing
212 	 * the number needed from the number available.  This will
213 	 * fail if the count would go below zero.
214 	 */
215 	if (rtextents > 0) {
216 		error = xfs_mod_frextents(tp->t_mountp, -((int64_t)rtextents));
217 		if (error) {
218 			error = -ENOSPC;
219 			goto undo_log;
220 		}
221 		tp->t_rtx_res += rtextents;
222 	}
223 
224 	return 0;
225 
226 	/*
227 	 * Error cases jump to one of these labels to undo any
228 	 * reservations which have already been performed.
229 	 */
230 undo_log:
231 	if (resp->tr_logres > 0) {
232 		xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, false);
233 		tp->t_ticket = NULL;
234 		tp->t_log_res = 0;
235 		tp->t_flags &= ~XFS_TRANS_PERM_LOG_RES;
236 	}
237 
238 undo_blocks:
239 	if (blocks > 0) {
240 		xfs_mod_fdblocks(tp->t_mountp, (int64_t)blocks, rsvd);
241 		tp->t_blk_res = 0;
242 	}
243 
244 	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
245 
246 	return error;
247 }
248 
249 int
250 xfs_trans_alloc(
251 	struct xfs_mount	*mp,
252 	struct xfs_trans_res	*resp,
253 	uint			blocks,
254 	uint			rtextents,
255 	uint			flags,
256 	struct xfs_trans	**tpp)
257 {
258 	struct xfs_trans	*tp;
259 	int			error;
260 
261 	/*
262 	 * Allocate the handle before we do our freeze accounting and setting up
263 	 * GFP_NOFS allocation context so that we avoid lockdep false positives
264 	 * by doing GFP_KERNEL allocations inside sb_start_intwrite().
265 	 */
266 	tp = kmem_zone_zalloc(xfs_trans_zone, 0);
267 	if (!(flags & XFS_TRANS_NO_WRITECOUNT))
268 		sb_start_intwrite(mp->m_super);
269 
270 	/*
271 	 * Zero-reservation ("empty") transactions can't modify anything, so
272 	 * they're allowed to run while we're frozen.
273 	 */
274 	WARN_ON(resp->tr_logres > 0 &&
275 		mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
276 	atomic_inc(&mp->m_active_trans);
277 
278 	tp->t_magic = XFS_TRANS_HEADER_MAGIC;
279 	tp->t_flags = flags;
280 	tp->t_mountp = mp;
281 	INIT_LIST_HEAD(&tp->t_items);
282 	INIT_LIST_HEAD(&tp->t_busy);
283 	INIT_LIST_HEAD(&tp->t_dfops);
284 	tp->t_firstblock = NULLFSBLOCK;
285 
286 	error = xfs_trans_reserve(tp, resp, blocks, rtextents);
287 	if (error) {
288 		xfs_trans_cancel(tp);
289 		return error;
290 	}
291 
292 	trace_xfs_trans_alloc(tp, _RET_IP_);
293 
294 	*tpp = tp;
295 	return 0;
296 }
297 
298 /*
299  * Create an empty transaction with no reservation.  This is a defensive
300  * mechanism for routines that query metadata without actually modifying
301  * them -- if the metadata being queried is somehow cross-linked (think a
302  * btree block pointer that points higher in the tree), we risk deadlock.
303  * However, blocks grabbed as part of a transaction can be re-grabbed.
304  * The verifiers will notice the corrupt block and the operation will fail
305  * back to userspace without deadlocking.
306  *
307  * Note the zero-length reservation; this transaction MUST be cancelled
308  * without any dirty data.
309  *
310  * Callers should obtain freeze protection to avoid two conflicts with fs
311  * freezing: (1) having active transactions trip the m_active_trans ASSERTs;
312  * and (2) grabbing buffers at the same time that freeze is trying to drain
313  * the buffer LRU list.
314  */
315 int
316 xfs_trans_alloc_empty(
317 	struct xfs_mount		*mp,
318 	struct xfs_trans		**tpp)
319 {
320 	struct xfs_trans_res		resv = {0};
321 
322 	return xfs_trans_alloc(mp, &resv, 0, 0, XFS_TRANS_NO_WRITECOUNT, tpp);
323 }
324 
325 /*
326  * Record the indicated change to the given field for application
327  * to the file system's superblock when the transaction commits.
328  * For now, just store the change in the transaction structure.
329  *
330  * Mark the transaction structure to indicate that the superblock
331  * needs to be updated before committing.
332  *
333  * Because we may not be keeping track of allocated/free inodes and
334  * used filesystem blocks in the superblock, we do not mark the
335  * superblock dirty in this transaction if we modify these fields.
336  * We still need to update the transaction deltas so that they get
337  * applied to the incore superblock, but we don't want them to
338  * cause the superblock to get locked and logged if these are the
339  * only fields in the superblock that the transaction modifies.
340  */
341 void
342 xfs_trans_mod_sb(
343 	xfs_trans_t	*tp,
344 	uint		field,
345 	int64_t		delta)
346 {
347 	uint32_t	flags = (XFS_TRANS_DIRTY|XFS_TRANS_SB_DIRTY);
348 	xfs_mount_t	*mp = tp->t_mountp;
349 
350 	switch (field) {
351 	case XFS_TRANS_SB_ICOUNT:
352 		tp->t_icount_delta += delta;
353 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
354 			flags &= ~XFS_TRANS_SB_DIRTY;
355 		break;
356 	case XFS_TRANS_SB_IFREE:
357 		tp->t_ifree_delta += delta;
358 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
359 			flags &= ~XFS_TRANS_SB_DIRTY;
360 		break;
361 	case XFS_TRANS_SB_FDBLOCKS:
362 		/*
363 		 * Track the number of blocks allocated in the transaction.
364 		 * Make sure it does not exceed the number reserved. If so,
365 		 * shutdown as this can lead to accounting inconsistency.
366 		 */
367 		if (delta < 0) {
368 			tp->t_blk_res_used += (uint)-delta;
369 			if (tp->t_blk_res_used > tp->t_blk_res)
370 				xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
371 		}
372 		tp->t_fdblocks_delta += delta;
373 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
374 			flags &= ~XFS_TRANS_SB_DIRTY;
375 		break;
376 	case XFS_TRANS_SB_RES_FDBLOCKS:
377 		/*
378 		 * The allocation has already been applied to the
379 		 * in-core superblock's counter.  This should only
380 		 * be applied to the on-disk superblock.
381 		 */
382 		tp->t_res_fdblocks_delta += delta;
383 		if (xfs_sb_version_haslazysbcount(&mp->m_sb))
384 			flags &= ~XFS_TRANS_SB_DIRTY;
385 		break;
386 	case XFS_TRANS_SB_FREXTENTS:
387 		/*
388 		 * Track the number of blocks allocated in the
389 		 * transaction.  Make sure it does not exceed the
390 		 * number reserved.
391 		 */
392 		if (delta < 0) {
393 			tp->t_rtx_res_used += (uint)-delta;
394 			ASSERT(tp->t_rtx_res_used <= tp->t_rtx_res);
395 		}
396 		tp->t_frextents_delta += delta;
397 		break;
398 	case XFS_TRANS_SB_RES_FREXTENTS:
399 		/*
400 		 * The allocation has already been applied to the
401 		 * in-core superblock's counter.  This should only
402 		 * be applied to the on-disk superblock.
403 		 */
404 		ASSERT(delta < 0);
405 		tp->t_res_frextents_delta += delta;
406 		break;
407 	case XFS_TRANS_SB_DBLOCKS:
408 		ASSERT(delta > 0);
409 		tp->t_dblocks_delta += delta;
410 		break;
411 	case XFS_TRANS_SB_AGCOUNT:
412 		ASSERT(delta > 0);
413 		tp->t_agcount_delta += delta;
414 		break;
415 	case XFS_TRANS_SB_IMAXPCT:
416 		tp->t_imaxpct_delta += delta;
417 		break;
418 	case XFS_TRANS_SB_REXTSIZE:
419 		tp->t_rextsize_delta += delta;
420 		break;
421 	case XFS_TRANS_SB_RBMBLOCKS:
422 		tp->t_rbmblocks_delta += delta;
423 		break;
424 	case XFS_TRANS_SB_RBLOCKS:
425 		tp->t_rblocks_delta += delta;
426 		break;
427 	case XFS_TRANS_SB_REXTENTS:
428 		tp->t_rextents_delta += delta;
429 		break;
430 	case XFS_TRANS_SB_REXTSLOG:
431 		tp->t_rextslog_delta += delta;
432 		break;
433 	default:
434 		ASSERT(0);
435 		return;
436 	}
437 
438 	tp->t_flags |= flags;
439 }
440 
441 /*
442  * xfs_trans_apply_sb_deltas() is called from the commit code
443  * to bring the superblock buffer into the current transaction
444  * and modify it as requested by earlier calls to xfs_trans_mod_sb().
445  *
446  * For now we just look at each field allowed to change and change
447  * it if necessary.
448  */
449 STATIC void
450 xfs_trans_apply_sb_deltas(
451 	xfs_trans_t	*tp)
452 {
453 	xfs_dsb_t	*sbp;
454 	xfs_buf_t	*bp;
455 	int		whole = 0;
456 
457 	bp = xfs_trans_getsb(tp, tp->t_mountp);
458 	sbp = bp->b_addr;
459 
460 	/*
461 	 * Check that superblock mods match the mods made to AGF counters.
462 	 */
463 	ASSERT((tp->t_fdblocks_delta + tp->t_res_fdblocks_delta) ==
464 	       (tp->t_ag_freeblks_delta + tp->t_ag_flist_delta +
465 		tp->t_ag_btree_delta));
466 
467 	/*
468 	 * Only update the superblock counters if we are logging them
469 	 */
470 	if (!xfs_sb_version_haslazysbcount(&(tp->t_mountp->m_sb))) {
471 		if (tp->t_icount_delta)
472 			be64_add_cpu(&sbp->sb_icount, tp->t_icount_delta);
473 		if (tp->t_ifree_delta)
474 			be64_add_cpu(&sbp->sb_ifree, tp->t_ifree_delta);
475 		if (tp->t_fdblocks_delta)
476 			be64_add_cpu(&sbp->sb_fdblocks, tp->t_fdblocks_delta);
477 		if (tp->t_res_fdblocks_delta)
478 			be64_add_cpu(&sbp->sb_fdblocks, tp->t_res_fdblocks_delta);
479 	}
480 
481 	if (tp->t_frextents_delta)
482 		be64_add_cpu(&sbp->sb_frextents, tp->t_frextents_delta);
483 	if (tp->t_res_frextents_delta)
484 		be64_add_cpu(&sbp->sb_frextents, tp->t_res_frextents_delta);
485 
486 	if (tp->t_dblocks_delta) {
487 		be64_add_cpu(&sbp->sb_dblocks, tp->t_dblocks_delta);
488 		whole = 1;
489 	}
490 	if (tp->t_agcount_delta) {
491 		be32_add_cpu(&sbp->sb_agcount, tp->t_agcount_delta);
492 		whole = 1;
493 	}
494 	if (tp->t_imaxpct_delta) {
495 		sbp->sb_imax_pct += tp->t_imaxpct_delta;
496 		whole = 1;
497 	}
498 	if (tp->t_rextsize_delta) {
499 		be32_add_cpu(&sbp->sb_rextsize, tp->t_rextsize_delta);
500 		whole = 1;
501 	}
502 	if (tp->t_rbmblocks_delta) {
503 		be32_add_cpu(&sbp->sb_rbmblocks, tp->t_rbmblocks_delta);
504 		whole = 1;
505 	}
506 	if (tp->t_rblocks_delta) {
507 		be64_add_cpu(&sbp->sb_rblocks, tp->t_rblocks_delta);
508 		whole = 1;
509 	}
510 	if (tp->t_rextents_delta) {
511 		be64_add_cpu(&sbp->sb_rextents, tp->t_rextents_delta);
512 		whole = 1;
513 	}
514 	if (tp->t_rextslog_delta) {
515 		sbp->sb_rextslog += tp->t_rextslog_delta;
516 		whole = 1;
517 	}
518 
519 	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SB_BUF);
520 	if (whole)
521 		/*
522 		 * Log the whole thing, the fields are noncontiguous.
523 		 */
524 		xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_dsb_t) - 1);
525 	else
526 		/*
527 		 * Since all the modifiable fields are contiguous, we
528 		 * can get away with this.
529 		 */
530 		xfs_trans_log_buf(tp, bp, offsetof(xfs_dsb_t, sb_icount),
531 				  offsetof(xfs_dsb_t, sb_frextents) +
532 				  sizeof(sbp->sb_frextents) - 1);
533 }
534 
535 STATIC int
536 xfs_sb_mod8(
537 	uint8_t			*field,
538 	int8_t			delta)
539 {
540 	int8_t			counter = *field;
541 
542 	counter += delta;
543 	if (counter < 0) {
544 		ASSERT(0);
545 		return -EINVAL;
546 	}
547 	*field = counter;
548 	return 0;
549 }
550 
551 STATIC int
552 xfs_sb_mod32(
553 	uint32_t		*field,
554 	int32_t			delta)
555 {
556 	int32_t			counter = *field;
557 
558 	counter += delta;
559 	if (counter < 0) {
560 		ASSERT(0);
561 		return -EINVAL;
562 	}
563 	*field = counter;
564 	return 0;
565 }
566 
567 STATIC int
568 xfs_sb_mod64(
569 	uint64_t		*field,
570 	int64_t			delta)
571 {
572 	int64_t			counter = *field;
573 
574 	counter += delta;
575 	if (counter < 0) {
576 		ASSERT(0);
577 		return -EINVAL;
578 	}
579 	*field = counter;
580 	return 0;
581 }
582 
583 /*
584  * xfs_trans_unreserve_and_mod_sb() is called to release unused reservations
585  * and apply superblock counter changes to the in-core superblock.  The
586  * t_res_fdblocks_delta and t_res_frextents_delta fields are explicitly NOT
587  * applied to the in-core superblock.  The idea is that that has already been
588  * done.
589  *
590  * If we are not logging superblock counters, then the inode allocated/free and
591  * used block counts are not updated in the on disk superblock. In this case,
592  * XFS_TRANS_SB_DIRTY will not be set when the transaction is updated but we
593  * still need to update the incore superblock with the changes.
594  */
595 void
596 xfs_trans_unreserve_and_mod_sb(
597 	struct xfs_trans	*tp)
598 {
599 	struct xfs_mount	*mp = tp->t_mountp;
600 	bool			rsvd = (tp->t_flags & XFS_TRANS_RESERVE) != 0;
601 	int64_t			blkdelta = 0;
602 	int64_t			rtxdelta = 0;
603 	int64_t			idelta = 0;
604 	int64_t			ifreedelta = 0;
605 	int			error;
606 
607 	/* calculate deltas */
608 	if (tp->t_blk_res > 0)
609 		blkdelta = tp->t_blk_res;
610 	if ((tp->t_fdblocks_delta != 0) &&
611 	    (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
612 	     (tp->t_flags & XFS_TRANS_SB_DIRTY)))
613 	        blkdelta += tp->t_fdblocks_delta;
614 
615 	if (tp->t_rtx_res > 0)
616 		rtxdelta = tp->t_rtx_res;
617 	if ((tp->t_frextents_delta != 0) &&
618 	    (tp->t_flags & XFS_TRANS_SB_DIRTY))
619 		rtxdelta += tp->t_frextents_delta;
620 
621 	if (xfs_sb_version_haslazysbcount(&mp->m_sb) ||
622 	     (tp->t_flags & XFS_TRANS_SB_DIRTY)) {
623 		idelta = tp->t_icount_delta;
624 		ifreedelta = tp->t_ifree_delta;
625 	}
626 
627 	/* apply the per-cpu counters */
628 	if (blkdelta) {
629 		error = xfs_mod_fdblocks(mp, blkdelta, rsvd);
630 		if (error)
631 			goto out;
632 	}
633 
634 	if (idelta) {
635 		error = xfs_mod_icount(mp, idelta);
636 		if (error)
637 			goto out_undo_fdblocks;
638 	}
639 
640 	if (ifreedelta) {
641 		error = xfs_mod_ifree(mp, ifreedelta);
642 		if (error)
643 			goto out_undo_icount;
644 	}
645 
646 	if (rtxdelta == 0 && !(tp->t_flags & XFS_TRANS_SB_DIRTY))
647 		return;
648 
649 	/* apply remaining deltas */
650 	spin_lock(&mp->m_sb_lock);
651 	if (rtxdelta) {
652 		error = xfs_sb_mod64(&mp->m_sb.sb_frextents, rtxdelta);
653 		if (error)
654 			goto out_undo_ifree;
655 	}
656 
657 	if (tp->t_dblocks_delta != 0) {
658 		error = xfs_sb_mod64(&mp->m_sb.sb_dblocks, tp->t_dblocks_delta);
659 		if (error)
660 			goto out_undo_frextents;
661 	}
662 	if (tp->t_agcount_delta != 0) {
663 		error = xfs_sb_mod32(&mp->m_sb.sb_agcount, tp->t_agcount_delta);
664 		if (error)
665 			goto out_undo_dblocks;
666 	}
667 	if (tp->t_imaxpct_delta != 0) {
668 		error = xfs_sb_mod8(&mp->m_sb.sb_imax_pct, tp->t_imaxpct_delta);
669 		if (error)
670 			goto out_undo_agcount;
671 	}
672 	if (tp->t_rextsize_delta != 0) {
673 		error = xfs_sb_mod32(&mp->m_sb.sb_rextsize,
674 				     tp->t_rextsize_delta);
675 		if (error)
676 			goto out_undo_imaxpct;
677 	}
678 	if (tp->t_rbmblocks_delta != 0) {
679 		error = xfs_sb_mod32(&mp->m_sb.sb_rbmblocks,
680 				     tp->t_rbmblocks_delta);
681 		if (error)
682 			goto out_undo_rextsize;
683 	}
684 	if (tp->t_rblocks_delta != 0) {
685 		error = xfs_sb_mod64(&mp->m_sb.sb_rblocks, tp->t_rblocks_delta);
686 		if (error)
687 			goto out_undo_rbmblocks;
688 	}
689 	if (tp->t_rextents_delta != 0) {
690 		error = xfs_sb_mod64(&mp->m_sb.sb_rextents,
691 				     tp->t_rextents_delta);
692 		if (error)
693 			goto out_undo_rblocks;
694 	}
695 	if (tp->t_rextslog_delta != 0) {
696 		error = xfs_sb_mod8(&mp->m_sb.sb_rextslog,
697 				     tp->t_rextslog_delta);
698 		if (error)
699 			goto out_undo_rextents;
700 	}
701 	spin_unlock(&mp->m_sb_lock);
702 	return;
703 
704 out_undo_rextents:
705 	if (tp->t_rextents_delta)
706 		xfs_sb_mod64(&mp->m_sb.sb_rextents, -tp->t_rextents_delta);
707 out_undo_rblocks:
708 	if (tp->t_rblocks_delta)
709 		xfs_sb_mod64(&mp->m_sb.sb_rblocks, -tp->t_rblocks_delta);
710 out_undo_rbmblocks:
711 	if (tp->t_rbmblocks_delta)
712 		xfs_sb_mod32(&mp->m_sb.sb_rbmblocks, -tp->t_rbmblocks_delta);
713 out_undo_rextsize:
714 	if (tp->t_rextsize_delta)
715 		xfs_sb_mod32(&mp->m_sb.sb_rextsize, -tp->t_rextsize_delta);
716 out_undo_imaxpct:
717 	if (tp->t_rextsize_delta)
718 		xfs_sb_mod8(&mp->m_sb.sb_imax_pct, -tp->t_imaxpct_delta);
719 out_undo_agcount:
720 	if (tp->t_agcount_delta)
721 		xfs_sb_mod32(&mp->m_sb.sb_agcount, -tp->t_agcount_delta);
722 out_undo_dblocks:
723 	if (tp->t_dblocks_delta)
724 		xfs_sb_mod64(&mp->m_sb.sb_dblocks, -tp->t_dblocks_delta);
725 out_undo_frextents:
726 	if (rtxdelta)
727 		xfs_sb_mod64(&mp->m_sb.sb_frextents, -rtxdelta);
728 out_undo_ifree:
729 	spin_unlock(&mp->m_sb_lock);
730 	if (ifreedelta)
731 		xfs_mod_ifree(mp, -ifreedelta);
732 out_undo_icount:
733 	if (idelta)
734 		xfs_mod_icount(mp, -idelta);
735 out_undo_fdblocks:
736 	if (blkdelta)
737 		xfs_mod_fdblocks(mp, -blkdelta, rsvd);
738 out:
739 	ASSERT(error == 0);
740 	return;
741 }
742 
743 /* Add the given log item to the transaction's list of log items. */
744 void
745 xfs_trans_add_item(
746 	struct xfs_trans	*tp,
747 	struct xfs_log_item	*lip)
748 {
749 	ASSERT(lip->li_mountp == tp->t_mountp);
750 	ASSERT(lip->li_ailp == tp->t_mountp->m_ail);
751 	ASSERT(list_empty(&lip->li_trans));
752 	ASSERT(!test_bit(XFS_LI_DIRTY, &lip->li_flags));
753 
754 	list_add_tail(&lip->li_trans, &tp->t_items);
755 	trace_xfs_trans_add_item(tp, _RET_IP_);
756 }
757 
758 /*
759  * Unlink the log item from the transaction. the log item is no longer
760  * considered dirty in this transaction, as the linked transaction has
761  * finished, either by abort or commit completion.
762  */
763 void
764 xfs_trans_del_item(
765 	struct xfs_log_item	*lip)
766 {
767 	clear_bit(XFS_LI_DIRTY, &lip->li_flags);
768 	list_del_init(&lip->li_trans);
769 }
770 
771 /* Detach and unlock all of the items in a transaction */
772 static void
773 xfs_trans_free_items(
774 	struct xfs_trans	*tp,
775 	bool			abort)
776 {
777 	struct xfs_log_item	*lip, *next;
778 
779 	trace_xfs_trans_free_items(tp, _RET_IP_);
780 
781 	list_for_each_entry_safe(lip, next, &tp->t_items, li_trans) {
782 		xfs_trans_del_item(lip);
783 		if (abort)
784 			set_bit(XFS_LI_ABORTED, &lip->li_flags);
785 		if (lip->li_ops->iop_release)
786 			lip->li_ops->iop_release(lip);
787 	}
788 }
789 
790 static inline void
791 xfs_log_item_batch_insert(
792 	struct xfs_ail		*ailp,
793 	struct xfs_ail_cursor	*cur,
794 	struct xfs_log_item	**log_items,
795 	int			nr_items,
796 	xfs_lsn_t		commit_lsn)
797 {
798 	int	i;
799 
800 	spin_lock(&ailp->ail_lock);
801 	/* xfs_trans_ail_update_bulk drops ailp->ail_lock */
802 	xfs_trans_ail_update_bulk(ailp, cur, log_items, nr_items, commit_lsn);
803 
804 	for (i = 0; i < nr_items; i++) {
805 		struct xfs_log_item *lip = log_items[i];
806 
807 		if (lip->li_ops->iop_unpin)
808 			lip->li_ops->iop_unpin(lip, 0);
809 	}
810 }
811 
812 /*
813  * Bulk operation version of xfs_trans_committed that takes a log vector of
814  * items to insert into the AIL. This uses bulk AIL insertion techniques to
815  * minimise lock traffic.
816  *
817  * If we are called with the aborted flag set, it is because a log write during
818  * a CIL checkpoint commit has failed. In this case, all the items in the
819  * checkpoint have already gone through iop_committed and iop_committing, which
820  * means that checkpoint commit abort handling is treated exactly the same
821  * as an iclog write error even though we haven't started any IO yet. Hence in
822  * this case all we need to do is iop_committed processing, followed by an
823  * iop_unpin(aborted) call.
824  *
825  * The AIL cursor is used to optimise the insert process. If commit_lsn is not
826  * at the end of the AIL, the insert cursor avoids the need to walk
827  * the AIL to find the insertion point on every xfs_log_item_batch_insert()
828  * call. This saves a lot of needless list walking and is a net win, even
829  * though it slightly increases that amount of AIL lock traffic to set it up
830  * and tear it down.
831  */
832 void
833 xfs_trans_committed_bulk(
834 	struct xfs_ail		*ailp,
835 	struct xfs_log_vec	*log_vector,
836 	xfs_lsn_t		commit_lsn,
837 	bool			aborted)
838 {
839 #define LOG_ITEM_BATCH_SIZE	32
840 	struct xfs_log_item	*log_items[LOG_ITEM_BATCH_SIZE];
841 	struct xfs_log_vec	*lv;
842 	struct xfs_ail_cursor	cur;
843 	int			i = 0;
844 
845 	spin_lock(&ailp->ail_lock);
846 	xfs_trans_ail_cursor_last(ailp, &cur, commit_lsn);
847 	spin_unlock(&ailp->ail_lock);
848 
849 	/* unpin all the log items */
850 	for (lv = log_vector; lv; lv = lv->lv_next ) {
851 		struct xfs_log_item	*lip = lv->lv_item;
852 		xfs_lsn_t		item_lsn;
853 
854 		if (aborted)
855 			set_bit(XFS_LI_ABORTED, &lip->li_flags);
856 
857 		if (lip->li_ops->flags & XFS_ITEM_RELEASE_WHEN_COMMITTED) {
858 			lip->li_ops->iop_release(lip);
859 			continue;
860 		}
861 
862 		if (lip->li_ops->iop_committed)
863 			item_lsn = lip->li_ops->iop_committed(lip, commit_lsn);
864 		else
865 			item_lsn = commit_lsn;
866 
867 		/* item_lsn of -1 means the item needs no further processing */
868 		if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0)
869 			continue;
870 
871 		/*
872 		 * if we are aborting the operation, no point in inserting the
873 		 * object into the AIL as we are in a shutdown situation.
874 		 */
875 		if (aborted) {
876 			ASSERT(XFS_FORCED_SHUTDOWN(ailp->ail_mount));
877 			if (lip->li_ops->iop_unpin)
878 				lip->li_ops->iop_unpin(lip, 1);
879 			continue;
880 		}
881 
882 		if (item_lsn != commit_lsn) {
883 
884 			/*
885 			 * Not a bulk update option due to unusual item_lsn.
886 			 * Push into AIL immediately, rechecking the lsn once
887 			 * we have the ail lock. Then unpin the item. This does
888 			 * not affect the AIL cursor the bulk insert path is
889 			 * using.
890 			 */
891 			spin_lock(&ailp->ail_lock);
892 			if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0)
893 				xfs_trans_ail_update(ailp, lip, item_lsn);
894 			else
895 				spin_unlock(&ailp->ail_lock);
896 			if (lip->li_ops->iop_unpin)
897 				lip->li_ops->iop_unpin(lip, 0);
898 			continue;
899 		}
900 
901 		/* Item is a candidate for bulk AIL insert.  */
902 		log_items[i++] = lv->lv_item;
903 		if (i >= LOG_ITEM_BATCH_SIZE) {
904 			xfs_log_item_batch_insert(ailp, &cur, log_items,
905 					LOG_ITEM_BATCH_SIZE, commit_lsn);
906 			i = 0;
907 		}
908 	}
909 
910 	/* make sure we insert the remainder! */
911 	if (i)
912 		xfs_log_item_batch_insert(ailp, &cur, log_items, i, commit_lsn);
913 
914 	spin_lock(&ailp->ail_lock);
915 	xfs_trans_ail_cursor_done(&cur);
916 	spin_unlock(&ailp->ail_lock);
917 }
918 
919 /*
920  * Commit the given transaction to the log.
921  *
922  * XFS disk error handling mechanism is not based on a typical
923  * transaction abort mechanism. Logically after the filesystem
924  * gets marked 'SHUTDOWN', we can't let any new transactions
925  * be durable - ie. committed to disk - because some metadata might
926  * be inconsistent. In such cases, this returns an error, and the
927  * caller may assume that all locked objects joined to the transaction
928  * have already been unlocked as if the commit had succeeded.
929  * Do not reference the transaction structure after this call.
930  */
931 static int
932 __xfs_trans_commit(
933 	struct xfs_trans	*tp,
934 	bool			regrant)
935 {
936 	struct xfs_mount	*mp = tp->t_mountp;
937 	xfs_lsn_t		commit_lsn = -1;
938 	int			error = 0;
939 	int			sync = tp->t_flags & XFS_TRANS_SYNC;
940 
941 	trace_xfs_trans_commit(tp, _RET_IP_);
942 
943 	/*
944 	 * Finish deferred items on final commit. Only permanent transactions
945 	 * should ever have deferred ops.
946 	 */
947 	WARN_ON_ONCE(!list_empty(&tp->t_dfops) &&
948 		     !(tp->t_flags & XFS_TRANS_PERM_LOG_RES));
949 	if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) {
950 		error = xfs_defer_finish_noroll(&tp);
951 		if (error)
952 			goto out_unreserve;
953 	}
954 
955 	/*
956 	 * If there is nothing to be logged by the transaction,
957 	 * then unlock all of the items associated with the
958 	 * transaction and free the transaction structure.
959 	 * Also make sure to return any reserved blocks to
960 	 * the free pool.
961 	 */
962 	if (!(tp->t_flags & XFS_TRANS_DIRTY))
963 		goto out_unreserve;
964 
965 	if (XFS_FORCED_SHUTDOWN(mp)) {
966 		error = -EIO;
967 		goto out_unreserve;
968 	}
969 
970 	ASSERT(tp->t_ticket != NULL);
971 
972 	/*
973 	 * If we need to update the superblock, then do it now.
974 	 */
975 	if (tp->t_flags & XFS_TRANS_SB_DIRTY)
976 		xfs_trans_apply_sb_deltas(tp);
977 	xfs_trans_apply_dquot_deltas(tp);
978 
979 	xfs_log_commit_cil(mp, tp, &commit_lsn, regrant);
980 
981 	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
982 	xfs_trans_free(tp);
983 
984 	/*
985 	 * If the transaction needs to be synchronous, then force the
986 	 * log out now and wait for it.
987 	 */
988 	if (sync) {
989 		error = xfs_log_force_lsn(mp, commit_lsn, XFS_LOG_SYNC, NULL);
990 		XFS_STATS_INC(mp, xs_trans_sync);
991 	} else {
992 		XFS_STATS_INC(mp, xs_trans_async);
993 	}
994 
995 	return error;
996 
997 out_unreserve:
998 	xfs_trans_unreserve_and_mod_sb(tp);
999 
1000 	/*
1001 	 * It is indeed possible for the transaction to be not dirty but
1002 	 * the dqinfo portion to be.  All that means is that we have some
1003 	 * (non-persistent) quota reservations that need to be unreserved.
1004 	 */
1005 	xfs_trans_unreserve_and_mod_dquots(tp);
1006 	if (tp->t_ticket) {
1007 		commit_lsn = xfs_log_done(mp, tp->t_ticket, NULL, regrant);
1008 		if (commit_lsn == -1 && !error)
1009 			error = -EIO;
1010 		tp->t_ticket = NULL;
1011 	}
1012 	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
1013 	xfs_trans_free_items(tp, !!error);
1014 	xfs_trans_free(tp);
1015 
1016 	XFS_STATS_INC(mp, xs_trans_empty);
1017 	return error;
1018 }
1019 
1020 int
1021 xfs_trans_commit(
1022 	struct xfs_trans	*tp)
1023 {
1024 	return __xfs_trans_commit(tp, false);
1025 }
1026 
1027 /*
1028  * Unlock all of the transaction's items and free the transaction.
1029  * The transaction must not have modified any of its items, because
1030  * there is no way to restore them to their previous state.
1031  *
1032  * If the transaction has made a log reservation, make sure to release
1033  * it as well.
1034  */
1035 void
1036 xfs_trans_cancel(
1037 	struct xfs_trans	*tp)
1038 {
1039 	struct xfs_mount	*mp = tp->t_mountp;
1040 	bool			dirty = (tp->t_flags & XFS_TRANS_DIRTY);
1041 
1042 	trace_xfs_trans_cancel(tp, _RET_IP_);
1043 
1044 	if (tp->t_flags & XFS_TRANS_PERM_LOG_RES)
1045 		xfs_defer_cancel(tp);
1046 
1047 	/*
1048 	 * See if the caller is relying on us to shut down the
1049 	 * filesystem.  This happens in paths where we detect
1050 	 * corruption and decide to give up.
1051 	 */
1052 	if (dirty && !XFS_FORCED_SHUTDOWN(mp)) {
1053 		XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
1054 		xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1055 	}
1056 #ifdef DEBUG
1057 	if (!dirty && !XFS_FORCED_SHUTDOWN(mp)) {
1058 		struct xfs_log_item *lip;
1059 
1060 		list_for_each_entry(lip, &tp->t_items, li_trans)
1061 			ASSERT(!(lip->li_type == XFS_LI_EFD));
1062 	}
1063 #endif
1064 	xfs_trans_unreserve_and_mod_sb(tp);
1065 	xfs_trans_unreserve_and_mod_dquots(tp);
1066 
1067 	if (tp->t_ticket) {
1068 		xfs_log_done(mp, tp->t_ticket, NULL, false);
1069 		tp->t_ticket = NULL;
1070 	}
1071 
1072 	/* mark this thread as no longer being in a transaction */
1073 	current_restore_flags_nested(&tp->t_pflags, PF_MEMALLOC_NOFS);
1074 
1075 	xfs_trans_free_items(tp, dirty);
1076 	xfs_trans_free(tp);
1077 }
1078 
1079 /*
1080  * Roll from one trans in the sequence of PERMANENT transactions to
1081  * the next: permanent transactions are only flushed out when
1082  * committed with xfs_trans_commit(), but we still want as soon
1083  * as possible to let chunks of it go to the log. So we commit the
1084  * chunk we've been working on and get a new transaction to continue.
1085  */
1086 int
1087 xfs_trans_roll(
1088 	struct xfs_trans	**tpp)
1089 {
1090 	struct xfs_trans	*trans = *tpp;
1091 	struct xfs_trans_res	tres;
1092 	int			error;
1093 
1094 	trace_xfs_trans_roll(trans, _RET_IP_);
1095 
1096 	/*
1097 	 * Copy the critical parameters from one trans to the next.
1098 	 */
1099 	tres.tr_logres = trans->t_log_res;
1100 	tres.tr_logcount = trans->t_log_count;
1101 
1102 	*tpp = xfs_trans_dup(trans);
1103 
1104 	/*
1105 	 * Commit the current transaction.
1106 	 * If this commit failed, then it'd just unlock those items that
1107 	 * are not marked ihold. That also means that a filesystem shutdown
1108 	 * is in progress. The caller takes the responsibility to cancel
1109 	 * the duplicate transaction that gets returned.
1110 	 */
1111 	error = __xfs_trans_commit(trans, true);
1112 	if (error)
1113 		return error;
1114 
1115 	/*
1116 	 * Reserve space in the log for the next transaction.
1117 	 * This also pushes items in the "AIL", the list of logged items,
1118 	 * out to disk if they are taking up space at the tail of the log
1119 	 * that we want to use.  This requires that either nothing be locked
1120 	 * across this call, or that anything that is locked be logged in
1121 	 * the prior and the next transactions.
1122 	 */
1123 	tres.tr_logflags = XFS_TRANS_PERM_LOG_RES;
1124 	return xfs_trans_reserve(*tpp, &tres, 0, 0);
1125 }
1126