xref: /linux/fs/xfs/libxfs/xfs_trans_resv.c (revision 24168c5e6dfbdd5b414f048f47f75d64533296ca)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4  * Copyright (C) 2010 Red Hat, Inc.
5  * All Rights Reserved.
6  */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_da_format.h"
15 #include "xfs_da_btree.h"
16 #include "xfs_inode.h"
17 #include "xfs_bmap_btree.h"
18 #include "xfs_quota.h"
19 #include "xfs_trans.h"
20 #include "xfs_qm.h"
21 #include "xfs_trans_space.h"
22 #include "xfs_rtbitmap.h"
23 #include "xfs_attr_item.h"
24 #include "xfs_log.h"
25 #include "xfs_da_format.h"
26 
27 #define _ALLOC	true
28 #define _FREE	false
29 
30 /*
31  * A buffer has a format structure overhead in the log in addition
32  * to the data, so we need to take this into account when reserving
33  * space in a transaction for a buffer.  Round the space required up
34  * to a multiple of 128 bytes so that we don't change the historical
35  * reservation that has been used for this overhead.
36  */
37 STATIC uint
38 xfs_buf_log_overhead(void)
39 {
40 	return round_up(sizeof(struct xlog_op_header) +
41 			sizeof(struct xfs_buf_log_format), 128);
42 }
43 
44 /*
45  * Calculate out transaction log reservation per item in bytes.
46  *
47  * The nbufs argument is used to indicate the number of items that
48  * will be changed in a transaction.  size is used to tell how many
49  * bytes should be reserved per item.
50  */
51 STATIC uint
52 xfs_calc_buf_res(
53 	uint		nbufs,
54 	uint		size)
55 {
56 	return nbufs * (size + xfs_buf_log_overhead());
57 }
58 
59 /*
60  * Per-extent log reservation for the btree changes involved in freeing or
61  * allocating an extent.  In classic XFS there were two trees that will be
62  * modified (bnobt + cntbt).  With rmap enabled, there are three trees
63  * (rmapbt).  The number of blocks reserved is based on the formula:
64  *
65  * num trees * ((2 blocks/level * max depth) - 1)
66  *
67  * Keep in mind that max depth is calculated separately for each type of tree.
68  */
69 uint
70 xfs_allocfree_block_count(
71 	struct xfs_mount *mp,
72 	uint		num_ops)
73 {
74 	uint		blocks;
75 
76 	blocks = num_ops * 2 * (2 * mp->m_alloc_maxlevels - 1);
77 	if (xfs_has_rmapbt(mp))
78 		blocks += num_ops * (2 * mp->m_rmap_maxlevels - 1);
79 
80 	return blocks;
81 }
82 
83 /*
84  * Per-extent log reservation for refcount btree changes.  These are never done
85  * in the same transaction as an allocation or a free, so we compute them
86  * separately.
87  */
88 static unsigned int
89 xfs_refcountbt_block_count(
90 	struct xfs_mount	*mp,
91 	unsigned int		num_ops)
92 {
93 	return num_ops * (2 * mp->m_refc_maxlevels - 1);
94 }
95 
96 /*
97  * Logging inodes is really tricksy. They are logged in memory format,
98  * which means that what we write into the log doesn't directly translate into
99  * the amount of space they use on disk.
100  *
101  * Case in point - btree format forks in memory format use more space than the
102  * on-disk format. In memory, the buffer contains a normal btree block header so
103  * the btree code can treat it as though it is just another generic buffer.
104  * However, when we write it to the inode fork, we don't write all of this
105  * header as it isn't needed. e.g. the root is only ever in the inode, so
106  * there's no need for sibling pointers which would waste 16 bytes of space.
107  *
108  * Hence when we have an inode with a maximally sized btree format fork, then
109  * amount of information we actually log is greater than the size of the inode
110  * on disk. Hence we need an inode reservation function that calculates all this
111  * correctly. So, we log:
112  *
113  * - 4 log op headers for object
114  *	- for the ilf, the inode core and 2 forks
115  * - inode log format object
116  * - the inode core
117  * - two inode forks containing bmap btree root blocks.
118  *	- the btree data contained by both forks will fit into the inode size,
119  *	  hence when combined with the inode core above, we have a total of the
120  *	  actual inode size.
121  *	- the BMBT headers need to be accounted separately, as they are
122  *	  additional to the records and pointers that fit inside the inode
123  *	  forks.
124  */
125 STATIC uint
126 xfs_calc_inode_res(
127 	struct xfs_mount	*mp,
128 	uint			ninodes)
129 {
130 	return ninodes *
131 		(4 * sizeof(struct xlog_op_header) +
132 		 sizeof(struct xfs_inode_log_format) +
133 		 mp->m_sb.sb_inodesize +
134 		 2 * XFS_BMBT_BLOCK_LEN(mp));
135 }
136 
137 /*
138  * Inode btree record insertion/removal modifies the inode btree and free space
139  * btrees (since the inobt does not use the agfl). This requires the following
140  * reservation:
141  *
142  * the inode btree: max depth * blocksize
143  * the allocation btrees: 2 trees * (max depth - 1) * block size
144  *
145  * The caller must account for SB and AG header modifications, etc.
146  */
147 STATIC uint
148 xfs_calc_inobt_res(
149 	struct xfs_mount	*mp)
150 {
151 	return xfs_calc_buf_res(M_IGEO(mp)->inobt_maxlevels,
152 			XFS_FSB_TO_B(mp, 1)) +
153 				xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
154 			XFS_FSB_TO_B(mp, 1));
155 }
156 
157 /*
158  * The free inode btree is a conditional feature. The behavior differs slightly
159  * from that of the traditional inode btree in that the finobt tracks records
160  * for inode chunks with at least one free inode. A record can be removed from
161  * the tree during individual inode allocation. Therefore the finobt
162  * reservation is unconditional for both the inode chunk allocation and
163  * individual inode allocation (modify) cases.
164  *
165  * Behavior aside, the reservation for finobt modification is equivalent to the
166  * traditional inobt: cover a full finobt shape change plus block allocation.
167  */
168 STATIC uint
169 xfs_calc_finobt_res(
170 	struct xfs_mount	*mp)
171 {
172 	if (!xfs_has_finobt(mp))
173 		return 0;
174 
175 	return xfs_calc_inobt_res(mp);
176 }
177 
178 /*
179  * Calculate the reservation required to allocate or free an inode chunk. This
180  * includes:
181  *
182  * the allocation btrees: 2 trees * (max depth - 1) * block size
183  * the inode chunk: m_ino_geo.ialloc_blks * N
184  *
185  * The size N of the inode chunk reservation depends on whether it is for
186  * allocation or free and which type of create transaction is in use. An inode
187  * chunk free always invalidates the buffers and only requires reservation for
188  * headers (N == 0). An inode chunk allocation requires a chunk sized
189  * reservation on v4 and older superblocks to initialize the chunk. No chunk
190  * reservation is required for allocation on v5 supers, which use ordered
191  * buffers to initialize.
192  */
193 STATIC uint
194 xfs_calc_inode_chunk_res(
195 	struct xfs_mount	*mp,
196 	bool			alloc)
197 {
198 	uint			res, size = 0;
199 
200 	res = xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
201 			       XFS_FSB_TO_B(mp, 1));
202 	if (alloc) {
203 		/* icreate tx uses ordered buffers */
204 		if (xfs_has_v3inodes(mp))
205 			return res;
206 		size = XFS_FSB_TO_B(mp, 1);
207 	}
208 
209 	res += xfs_calc_buf_res(M_IGEO(mp)->ialloc_blks, size);
210 	return res;
211 }
212 
213 /*
214  * Per-extent log reservation for the btree changes involved in freeing or
215  * allocating a realtime extent.  We have to be able to log as many rtbitmap
216  * blocks as needed to mark inuse XFS_BMBT_MAX_EXTLEN blocks' worth of realtime
217  * extents, as well as the realtime summary block.
218  */
219 static unsigned int
220 xfs_rtalloc_block_count(
221 	struct xfs_mount	*mp,
222 	unsigned int		num_ops)
223 {
224 	unsigned int		rtbmp_blocks;
225 	xfs_rtxlen_t		rtxlen;
226 
227 	rtxlen = xfs_extlen_to_rtxlen(mp, XFS_MAX_BMBT_EXTLEN);
228 	rtbmp_blocks = xfs_rtbitmap_blockcount(mp, rtxlen);
229 	return (rtbmp_blocks + 1) * num_ops;
230 }
231 
232 /*
233  * Various log reservation values.
234  *
235  * These are based on the size of the file system block because that is what
236  * most transactions manipulate.  Each adds in an additional 128 bytes per
237  * item logged to try to account for the overhead of the transaction mechanism.
238  *
239  * Note:  Most of the reservations underestimate the number of allocation
240  * groups into which they could free extents in the xfs_defer_finish() call.
241  * This is because the number in the worst case is quite high and quite
242  * unusual.  In order to fix this we need to change xfs_defer_finish() to free
243  * extents in only a single AG at a time.  This will require changes to the
244  * EFI code as well, however, so that the EFI for the extents not freed is
245  * logged again in each transaction.  See SGI PV #261917.
246  *
247  * Reservation functions here avoid a huge stack in xfs_trans_init due to
248  * register overflow from temporaries in the calculations.
249  */
250 
251 /*
252  * Compute the log reservation required to handle the refcount update
253  * transaction.  Refcount updates are always done via deferred log items.
254  *
255  * This is calculated as:
256  * Data device refcount updates (t1):
257  *    the agfs of the ags containing the blocks: nr_ops * sector size
258  *    the refcount btrees: nr_ops * 1 trees * (2 * max depth - 1) * block size
259  */
260 static unsigned int
261 xfs_calc_refcountbt_reservation(
262 	struct xfs_mount	*mp,
263 	unsigned int		nr_ops)
264 {
265 	unsigned int		blksz = XFS_FSB_TO_B(mp, 1);
266 
267 	if (!xfs_has_reflink(mp))
268 		return 0;
269 
270 	return xfs_calc_buf_res(nr_ops, mp->m_sb.sb_sectsize) +
271 	       xfs_calc_buf_res(xfs_refcountbt_block_count(mp, nr_ops), blksz);
272 }
273 
274 /*
275  * In a write transaction we can allocate a maximum of 2
276  * extents.  This gives (t1):
277  *    the inode getting the new extents: inode size
278  *    the inode's bmap btree: max depth * block size
279  *    the agfs of the ags from which the extents are allocated: 2 * sector
280  *    the superblock free block counter: sector size
281  *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
282  * Or, if we're writing to a realtime file (t2):
283  *    the inode getting the new extents: inode size
284  *    the inode's bmap btree: max depth * block size
285  *    the agfs of the ags from which the extents are allocated: 2 * sector
286  *    the superblock free block counter: sector size
287  *    the realtime bitmap: ((XFS_BMBT_MAX_EXTLEN / rtextsize) / NBBY) bytes
288  *    the realtime summary: 1 block
289  *    the allocation btrees: 2 trees * (2 * max depth - 1) * block size
290  * And the bmap_finish transaction can free bmap blocks in a join (t3):
291  *    the agfs of the ags containing the blocks: 2 * sector size
292  *    the agfls of the ags containing the blocks: 2 * sector size
293  *    the super block free block counter: sector size
294  *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
295  * And any refcount updates that happen in a separate transaction (t4).
296  */
297 STATIC uint
298 xfs_calc_write_reservation(
299 	struct xfs_mount	*mp,
300 	bool			for_minlogsize)
301 {
302 	unsigned int		t1, t2, t3, t4;
303 	unsigned int		blksz = XFS_FSB_TO_B(mp, 1);
304 
305 	t1 = xfs_calc_inode_res(mp, 1) +
306 	     xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), blksz) +
307 	     xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
308 	     xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2), blksz);
309 
310 	if (xfs_has_realtime(mp)) {
311 		t2 = xfs_calc_inode_res(mp, 1) +
312 		     xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
313 				     blksz) +
314 		     xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
315 		     xfs_calc_buf_res(xfs_rtalloc_block_count(mp, 1), blksz) +
316 		     xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1), blksz);
317 	} else {
318 		t2 = 0;
319 	}
320 
321 	t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
322 	     xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2), blksz);
323 
324 	/*
325 	 * In the early days of reflink, we included enough reservation to log
326 	 * two refcountbt splits for each transaction.  The codebase runs
327 	 * refcountbt updates in separate transactions now, so to compute the
328 	 * minimum log size, add the refcountbtree splits back to t1 and t3 and
329 	 * do not account them separately as t4.  Reflink did not support
330 	 * realtime when the reservations were established, so no adjustment to
331 	 * t2 is needed.
332 	 */
333 	if (for_minlogsize) {
334 		unsigned int	adj = 0;
335 
336 		if (xfs_has_reflink(mp))
337 			adj = xfs_calc_buf_res(
338 					xfs_refcountbt_block_count(mp, 2),
339 					blksz);
340 		t1 += adj;
341 		t3 += adj;
342 		return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3);
343 	}
344 
345 	t4 = xfs_calc_refcountbt_reservation(mp, 1);
346 	return XFS_DQUOT_LOGRES(mp) + max(t4, max3(t1, t2, t3));
347 }
348 
349 unsigned int
350 xfs_calc_write_reservation_minlogsize(
351 	struct xfs_mount	*mp)
352 {
353 	return xfs_calc_write_reservation(mp, true);
354 }
355 
356 /*
357  * In truncating a file we free up to two extents at once.  We can modify (t1):
358  *    the inode being truncated: inode size
359  *    the inode's bmap btree: (max depth + 1) * block size
360  * And the bmap_finish transaction can free the blocks and bmap blocks (t2):
361  *    the agf for each of the ags: 4 * sector size
362  *    the agfl for each of the ags: 4 * sector size
363  *    the super block to reflect the freed blocks: sector size
364  *    worst case split in allocation btrees per extent assuming 4 extents:
365  *		4 exts * 2 trees * (2 * max depth - 1) * block size
366  * Or, if it's a realtime file (t3):
367  *    the agf for each of the ags: 2 * sector size
368  *    the agfl for each of the ags: 2 * sector size
369  *    the super block to reflect the freed blocks: sector size
370  *    the realtime bitmap:
371  *		2 exts * ((XFS_BMBT_MAX_EXTLEN / rtextsize) / NBBY) bytes
372  *    the realtime summary: 2 exts * 1 block
373  *    worst case split in allocation btrees per extent assuming 2 extents:
374  *		2 exts * 2 trees * (2 * max depth - 1) * block size
375  * And any refcount updates that happen in a separate transaction (t4).
376  */
377 STATIC uint
378 xfs_calc_itruncate_reservation(
379 	struct xfs_mount	*mp,
380 	bool			for_minlogsize)
381 {
382 	unsigned int		t1, t2, t3, t4;
383 	unsigned int		blksz = XFS_FSB_TO_B(mp, 1);
384 
385 	t1 = xfs_calc_inode_res(mp, 1) +
386 	     xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, blksz);
387 
388 	t2 = xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
389 	     xfs_calc_buf_res(xfs_allocfree_block_count(mp, 4), blksz);
390 
391 	if (xfs_has_realtime(mp)) {
392 		t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
393 		     xfs_calc_buf_res(xfs_rtalloc_block_count(mp, 2), blksz) +
394 		     xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2), blksz);
395 	} else {
396 		t3 = 0;
397 	}
398 
399 	/*
400 	 * In the early days of reflink, we included enough reservation to log
401 	 * four refcountbt splits in the same transaction as bnobt/cntbt
402 	 * updates.  The codebase runs refcountbt updates in separate
403 	 * transactions now, so to compute the minimum log size, add the
404 	 * refcount btree splits back here and do not compute them separately
405 	 * as t4.  Reflink did not support realtime when the reservations were
406 	 * established, so do not adjust t3.
407 	 */
408 	if (for_minlogsize) {
409 		if (xfs_has_reflink(mp))
410 			t2 += xfs_calc_buf_res(
411 					xfs_refcountbt_block_count(mp, 4),
412 					blksz);
413 
414 		return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3);
415 	}
416 
417 	t4 = xfs_calc_refcountbt_reservation(mp, 2);
418 	return XFS_DQUOT_LOGRES(mp) + max(t4, max3(t1, t2, t3));
419 }
420 
421 unsigned int
422 xfs_calc_itruncate_reservation_minlogsize(
423 	struct xfs_mount	*mp)
424 {
425 	return xfs_calc_itruncate_reservation(mp, true);
426 }
427 
428 static inline unsigned int xfs_calc_pptr_link_overhead(void)
429 {
430 	return sizeof(struct xfs_attri_log_format) +
431 			xlog_calc_iovec_len(sizeof(struct xfs_parent_rec)) +
432 			xlog_calc_iovec_len(MAXNAMELEN - 1);
433 }
434 static inline unsigned int xfs_calc_pptr_unlink_overhead(void)
435 {
436 	return sizeof(struct xfs_attri_log_format) +
437 			xlog_calc_iovec_len(sizeof(struct xfs_parent_rec)) +
438 			xlog_calc_iovec_len(MAXNAMELEN - 1);
439 }
440 static inline unsigned int xfs_calc_pptr_replace_overhead(void)
441 {
442 	return sizeof(struct xfs_attri_log_format) +
443 			xlog_calc_iovec_len(sizeof(struct xfs_parent_rec)) +
444 			xlog_calc_iovec_len(MAXNAMELEN - 1) +
445 			xlog_calc_iovec_len(sizeof(struct xfs_parent_rec)) +
446 			xlog_calc_iovec_len(MAXNAMELEN - 1);
447 }
448 
449 /*
450  * In renaming a files we can modify:
451  *    the five inodes involved: 5 * inode size
452  *    the two directory btrees: 2 * (max depth + v2) * dir block size
453  *    the two directory bmap btrees: 2 * max depth * block size
454  * And the bmap_finish transaction can free dir and bmap blocks (two sets
455  *	of bmap blocks) giving (t2):
456  *    the agf for the ags in which the blocks live: 3 * sector size
457  *    the agfl for the ags in which the blocks live: 3 * sector size
458  *    the superblock for the free block count: sector size
459  *    the allocation btrees: 3 exts * 2 trees * (2 * max depth - 1) * block size
460  * If parent pointers are enabled (t3), then each transaction in the chain
461  *    must be capable of setting or removing the extended attribute
462  *    containing the parent information.  It must also be able to handle
463  *    the three xattr intent items that track the progress of the parent
464  *    pointer update.
465  */
466 STATIC uint
467 xfs_calc_rename_reservation(
468 	struct xfs_mount	*mp)
469 {
470 	unsigned int		overhead = XFS_DQUOT_LOGRES(mp);
471 	struct xfs_trans_resv	*resp = M_RES(mp);
472 	unsigned int		t1, t2, t3 = 0;
473 
474 	t1 = xfs_calc_inode_res(mp, 5) +
475 	     xfs_calc_buf_res(2 * XFS_DIROP_LOG_COUNT(mp),
476 			XFS_FSB_TO_B(mp, 1));
477 
478 	t2 = xfs_calc_buf_res(7, mp->m_sb.sb_sectsize) +
479 	     xfs_calc_buf_res(xfs_allocfree_block_count(mp, 3),
480 			XFS_FSB_TO_B(mp, 1));
481 
482 	if (xfs_has_parent(mp)) {
483 		unsigned int	rename_overhead, exchange_overhead;
484 
485 		t3 = max(resp->tr_attrsetm.tr_logres,
486 			 resp->tr_attrrm.tr_logres);
487 
488 		/*
489 		 * For a standard rename, the three xattr intent log items
490 		 * are (1) replacing the pptr for the source file; (2)
491 		 * removing the pptr on the dest file; and (3) adding a
492 		 * pptr for the whiteout file in the src dir.
493 		 *
494 		 * For an RENAME_EXCHANGE, there are two xattr intent
495 		 * items to replace the pptr for both src and dest
496 		 * files.  Link counts don't change and there is no
497 		 * whiteout.
498 		 *
499 		 * In the worst case we can end up relogging all log
500 		 * intent items to allow the log tail to move ahead, so
501 		 * they become overhead added to each transaction in a
502 		 * processing chain.
503 		 */
504 		rename_overhead = xfs_calc_pptr_replace_overhead() +
505 				  xfs_calc_pptr_unlink_overhead() +
506 				  xfs_calc_pptr_link_overhead();
507 		exchange_overhead = 2 * xfs_calc_pptr_replace_overhead();
508 
509 		overhead += max(rename_overhead, exchange_overhead);
510 	}
511 
512 	return overhead + max3(t1, t2, t3);
513 }
514 
515 static inline unsigned int
516 xfs_rename_log_count(
517 	struct xfs_mount	*mp,
518 	struct xfs_trans_resv	*resp)
519 {
520 	/* One for the rename, one more for freeing blocks */
521 	unsigned int		ret = XFS_RENAME_LOG_COUNT;
522 
523 	/*
524 	 * Pre-reserve enough log reservation to handle the transaction
525 	 * rolling needed to remove or add one parent pointer.
526 	 */
527 	if (xfs_has_parent(mp))
528 		ret += max(resp->tr_attrsetm.tr_logcount,
529 			   resp->tr_attrrm.tr_logcount);
530 
531 	return ret;
532 }
533 
534 /*
535  * For removing an inode from unlinked list at first, we can modify:
536  *    the agi hash list and counters: sector size
537  *    the on disk inode before ours in the agi hash list: inode cluster size
538  *    the on disk inode in the agi hash list: inode cluster size
539  */
540 STATIC uint
541 xfs_calc_iunlink_remove_reservation(
542 	struct xfs_mount        *mp)
543 {
544 	return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
545 	       2 * M_IGEO(mp)->inode_cluster_size;
546 }
547 
548 static inline unsigned int
549 xfs_link_log_count(
550 	struct xfs_mount	*mp,
551 	struct xfs_trans_resv	*resp)
552 {
553 	unsigned int		ret = XFS_LINK_LOG_COUNT;
554 
555 	/*
556 	 * Pre-reserve enough log reservation to handle the transaction
557 	 * rolling needed to add one parent pointer.
558 	 */
559 	if (xfs_has_parent(mp))
560 		ret += resp->tr_attrsetm.tr_logcount;
561 
562 	return ret;
563 }
564 
565 /*
566  * For creating a link to an inode:
567  *    the parent directory inode: inode size
568  *    the linked inode: inode size
569  *    the directory btree could split: (max depth + v2) * dir block size
570  *    the directory bmap btree could join or split: (max depth + v2) * blocksize
571  * And the bmap_finish transaction can free some bmap blocks giving:
572  *    the agf for the ag in which the blocks live: sector size
573  *    the agfl for the ag in which the blocks live: sector size
574  *    the superblock for the free block count: sector size
575  *    the allocation btrees: 2 trees * (2 * max depth - 1) * block size
576  */
577 STATIC uint
578 xfs_calc_link_reservation(
579 	struct xfs_mount	*mp)
580 {
581 	unsigned int		overhead = XFS_DQUOT_LOGRES(mp);
582 	struct xfs_trans_resv	*resp = M_RES(mp);
583 	unsigned int		t1, t2, t3 = 0;
584 
585 	overhead += xfs_calc_iunlink_remove_reservation(mp);
586 	t1 = xfs_calc_inode_res(mp, 2) +
587 	     xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1));
588 	t2 = xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
589 	     xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
590 			      XFS_FSB_TO_B(mp, 1));
591 
592 	if (xfs_has_parent(mp)) {
593 		t3 = resp->tr_attrsetm.tr_logres;
594 		overhead += xfs_calc_pptr_link_overhead();
595 	}
596 
597 	return overhead + max3(t1, t2, t3);
598 }
599 
600 /*
601  * For adding an inode to unlinked list we can modify:
602  *    the agi hash list: sector size
603  *    the on disk inode: inode cluster size
604  */
605 STATIC uint
606 xfs_calc_iunlink_add_reservation(xfs_mount_t *mp)
607 {
608 	return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
609 			M_IGEO(mp)->inode_cluster_size;
610 }
611 
612 static inline unsigned int
613 xfs_remove_log_count(
614 	struct xfs_mount	*mp,
615 	struct xfs_trans_resv	*resp)
616 {
617 	unsigned int		ret = XFS_REMOVE_LOG_COUNT;
618 
619 	/*
620 	 * Pre-reserve enough log reservation to handle the transaction
621 	 * rolling needed to add one parent pointer.
622 	 */
623 	if (xfs_has_parent(mp))
624 		ret += resp->tr_attrrm.tr_logcount;
625 
626 	return ret;
627 }
628 
629 /*
630  * For removing a directory entry we can modify:
631  *    the parent directory inode: inode size
632  *    the removed inode: inode size
633  *    the directory btree could join: (max depth + v2) * dir block size
634  *    the directory bmap btree could join or split: (max depth + v2) * blocksize
635  * And the bmap_finish transaction can free the dir and bmap blocks giving:
636  *    the agf for the ag in which the blocks live: 2 * sector size
637  *    the agfl for the ag in which the blocks live: 2 * sector size
638  *    the superblock for the free block count: sector size
639  *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
640  */
641 STATIC uint
642 xfs_calc_remove_reservation(
643 	struct xfs_mount	*mp)
644 {
645 	unsigned int            overhead = XFS_DQUOT_LOGRES(mp);
646 	struct xfs_trans_resv   *resp = M_RES(mp);
647 	unsigned int            t1, t2, t3 = 0;
648 
649 	overhead += xfs_calc_iunlink_add_reservation(mp);
650 
651 	t1 = xfs_calc_inode_res(mp, 2) +
652 	     xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1));
653 	t2 = xfs_calc_buf_res(4, mp->m_sb.sb_sectsize) +
654 	     xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2),
655 			      XFS_FSB_TO_B(mp, 1));
656 
657 	if (xfs_has_parent(mp)) {
658 		t3 = resp->tr_attrrm.tr_logres;
659 		overhead += xfs_calc_pptr_unlink_overhead();
660 	}
661 
662 	return overhead + max3(t1, t2, t3);
663 }
664 
665 /*
666  * For create, break it in to the two cases that the transaction
667  * covers. We start with the modify case - allocation done by modification
668  * of the state of existing inodes - and the allocation case.
669  */
670 
671 /*
672  * For create we can modify:
673  *    the parent directory inode: inode size
674  *    the new inode: inode size
675  *    the inode btree entry: block size
676  *    the superblock for the nlink flag: sector size
677  *    the directory btree: (max depth + v2) * dir block size
678  *    the directory inode's bmap btree: (max depth + v2) * block size
679  *    the finobt (record modification and allocation btrees)
680  */
681 STATIC uint
682 xfs_calc_create_resv_modify(
683 	struct xfs_mount	*mp)
684 {
685 	return xfs_calc_inode_res(mp, 2) +
686 		xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
687 		(uint)XFS_FSB_TO_B(mp, 1) +
688 		xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1)) +
689 		xfs_calc_finobt_res(mp);
690 }
691 
692 /*
693  * For icreate we can allocate some inodes giving:
694  *    the agi and agf of the ag getting the new inodes: 2 * sectorsize
695  *    the superblock for the nlink flag: sector size
696  *    the inode chunk (allocation, optional init)
697  *    the inobt (record insertion)
698  *    the finobt (optional, record insertion)
699  */
700 STATIC uint
701 xfs_calc_icreate_resv_alloc(
702 	struct xfs_mount	*mp)
703 {
704 	return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
705 		mp->m_sb.sb_sectsize +
706 		xfs_calc_inode_chunk_res(mp, _ALLOC) +
707 		xfs_calc_inobt_res(mp) +
708 		xfs_calc_finobt_res(mp);
709 }
710 
711 static inline unsigned int
712 xfs_icreate_log_count(
713 	struct xfs_mount	*mp,
714 	struct xfs_trans_resv	*resp)
715 {
716 	unsigned int		ret = XFS_CREATE_LOG_COUNT;
717 
718 	/*
719 	 * Pre-reserve enough log reservation to handle the transaction
720 	 * rolling needed to add one parent pointer.
721 	 */
722 	if (xfs_has_parent(mp))
723 		ret += resp->tr_attrsetm.tr_logcount;
724 
725 	return ret;
726 }
727 
728 STATIC uint
729 xfs_calc_icreate_reservation(
730 	struct xfs_mount	*mp)
731 {
732 	struct xfs_trans_resv	*resp = M_RES(mp);
733 	unsigned int		overhead = XFS_DQUOT_LOGRES(mp);
734 	unsigned int		t1, t2, t3 = 0;
735 
736 	t1 = xfs_calc_icreate_resv_alloc(mp);
737 	t2 = xfs_calc_create_resv_modify(mp);
738 
739 	if (xfs_has_parent(mp)) {
740 		t3 = resp->tr_attrsetm.tr_logres;
741 		overhead += xfs_calc_pptr_link_overhead();
742 	}
743 
744 	return overhead + max3(t1, t2, t3);
745 }
746 
747 STATIC uint
748 xfs_calc_create_tmpfile_reservation(
749 	struct xfs_mount        *mp)
750 {
751 	uint	res = XFS_DQUOT_LOGRES(mp);
752 
753 	res += xfs_calc_icreate_resv_alloc(mp);
754 	return res + xfs_calc_iunlink_add_reservation(mp);
755 }
756 
757 static inline unsigned int
758 xfs_mkdir_log_count(
759 	struct xfs_mount	*mp,
760 	struct xfs_trans_resv	*resp)
761 {
762 	unsigned int		ret = XFS_MKDIR_LOG_COUNT;
763 
764 	/*
765 	 * Pre-reserve enough log reservation to handle the transaction
766 	 * rolling needed to add one parent pointer.
767 	 */
768 	if (xfs_has_parent(mp))
769 		ret += resp->tr_attrsetm.tr_logcount;
770 
771 	return ret;
772 }
773 
774 /*
775  * Making a new directory is the same as creating a new file.
776  */
777 STATIC uint
778 xfs_calc_mkdir_reservation(
779 	struct xfs_mount	*mp)
780 {
781 	return xfs_calc_icreate_reservation(mp);
782 }
783 
784 static inline unsigned int
785 xfs_symlink_log_count(
786 	struct xfs_mount	*mp,
787 	struct xfs_trans_resv	*resp)
788 {
789 	unsigned int		ret = XFS_SYMLINK_LOG_COUNT;
790 
791 	/*
792 	 * Pre-reserve enough log reservation to handle the transaction
793 	 * rolling needed to add one parent pointer.
794 	 */
795 	if (xfs_has_parent(mp))
796 		ret += resp->tr_attrsetm.tr_logcount;
797 
798 	return ret;
799 }
800 
801 /*
802  * Making a new symplink is the same as creating a new file, but
803  * with the added blocks for remote symlink data which can be up to 1kB in
804  * length (XFS_SYMLINK_MAXLEN).
805  */
806 STATIC uint
807 xfs_calc_symlink_reservation(
808 	struct xfs_mount	*mp)
809 {
810 	return xfs_calc_icreate_reservation(mp) +
811 	       xfs_calc_buf_res(1, XFS_SYMLINK_MAXLEN);
812 }
813 
814 /*
815  * In freeing an inode we can modify:
816  *    the inode being freed: inode size
817  *    the super block free inode counter, AGF and AGFL: sector size
818  *    the on disk inode (agi unlinked list removal)
819  *    the inode chunk (invalidated, headers only)
820  *    the inode btree
821  *    the finobt (record insertion, removal or modification)
822  *
823  * Note that the inode chunk res. includes an allocfree res. for freeing of the
824  * inode chunk. This is technically extraneous because the inode chunk free is
825  * deferred (it occurs after a transaction roll). Include the extra reservation
826  * anyways since we've had reports of ifree transaction overruns due to too many
827  * agfl fixups during inode chunk frees.
828  */
829 STATIC uint
830 xfs_calc_ifree_reservation(
831 	struct xfs_mount	*mp)
832 {
833 	return XFS_DQUOT_LOGRES(mp) +
834 		xfs_calc_inode_res(mp, 1) +
835 		xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
836 		xfs_calc_iunlink_remove_reservation(mp) +
837 		xfs_calc_inode_chunk_res(mp, _FREE) +
838 		xfs_calc_inobt_res(mp) +
839 		xfs_calc_finobt_res(mp);
840 }
841 
842 /*
843  * When only changing the inode we log the inode and possibly the superblock
844  * We also add a bit of slop for the transaction stuff.
845  */
846 STATIC uint
847 xfs_calc_ichange_reservation(
848 	struct xfs_mount	*mp)
849 {
850 	return XFS_DQUOT_LOGRES(mp) +
851 		xfs_calc_inode_res(mp, 1) +
852 		xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
853 
854 }
855 
856 /*
857  * Growing the data section of the filesystem.
858  *	superblock
859  *	agi and agf
860  *	allocation btrees
861  */
862 STATIC uint
863 xfs_calc_growdata_reservation(
864 	struct xfs_mount	*mp)
865 {
866 	return xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
867 		xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
868 				 XFS_FSB_TO_B(mp, 1));
869 }
870 
871 /*
872  * Growing the rt section of the filesystem.
873  * In the first set of transactions (ALLOC) we allocate space to the
874  * bitmap or summary files.
875  *	superblock: sector size
876  *	agf of the ag from which the extent is allocated: sector size
877  *	bmap btree for bitmap/summary inode: max depth * blocksize
878  *	bitmap/summary inode: inode size
879  *	allocation btrees for 1 block alloc: 2 * (2 * maxdepth - 1) * blocksize
880  */
881 STATIC uint
882 xfs_calc_growrtalloc_reservation(
883 	struct xfs_mount	*mp)
884 {
885 	return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
886 		xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
887 				 XFS_FSB_TO_B(mp, 1)) +
888 		xfs_calc_inode_res(mp, 1) +
889 		xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
890 				 XFS_FSB_TO_B(mp, 1));
891 }
892 
893 /*
894  * Growing the rt section of the filesystem.
895  * In the second set of transactions (ZERO) we zero the new metadata blocks.
896  *	one bitmap/summary block: blocksize
897  */
898 STATIC uint
899 xfs_calc_growrtzero_reservation(
900 	struct xfs_mount	*mp)
901 {
902 	return xfs_calc_buf_res(1, mp->m_sb.sb_blocksize);
903 }
904 
905 /*
906  * Growing the rt section of the filesystem.
907  * In the third set of transactions (FREE) we update metadata without
908  * allocating any new blocks.
909  *	superblock: sector size
910  *	bitmap inode: inode size
911  *	summary inode: inode size
912  *	one bitmap block: blocksize
913  *	summary blocks: new summary size
914  */
915 STATIC uint
916 xfs_calc_growrtfree_reservation(
917 	struct xfs_mount	*mp)
918 {
919 	return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
920 		xfs_calc_inode_res(mp, 2) +
921 		xfs_calc_buf_res(1, mp->m_sb.sb_blocksize) +
922 		xfs_calc_buf_res(1, mp->m_rsumsize);
923 }
924 
925 /*
926  * Logging the inode modification timestamp on a synchronous write.
927  *	inode
928  */
929 STATIC uint
930 xfs_calc_swrite_reservation(
931 	struct xfs_mount	*mp)
932 {
933 	return xfs_calc_inode_res(mp, 1);
934 }
935 
936 /*
937  * Logging the inode mode bits when writing a setuid/setgid file
938  *	inode
939  */
940 STATIC uint
941 xfs_calc_writeid_reservation(
942 	struct xfs_mount	*mp)
943 {
944 	return xfs_calc_inode_res(mp, 1);
945 }
946 
947 /*
948  * Converting the inode from non-attributed to attributed.
949  *	the inode being converted: inode size
950  *	agf block and superblock (for block allocation)
951  *	the new block (directory sized)
952  *	bmap blocks for the new directory block
953  *	allocation btrees
954  */
955 STATIC uint
956 xfs_calc_addafork_reservation(
957 	struct xfs_mount	*mp)
958 {
959 	return XFS_DQUOT_LOGRES(mp) +
960 		xfs_calc_inode_res(mp, 1) +
961 		xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
962 		xfs_calc_buf_res(1, mp->m_dir_geo->blksize) +
963 		xfs_calc_buf_res(XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1,
964 				 XFS_FSB_TO_B(mp, 1)) +
965 		xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
966 				 XFS_FSB_TO_B(mp, 1));
967 }
968 
969 /*
970  * Removing the attribute fork of a file
971  *    the inode being truncated: inode size
972  *    the inode's bmap btree: max depth * block size
973  * And the bmap_finish transaction can free the blocks and bmap blocks:
974  *    the agf for each of the ags: 4 * sector size
975  *    the agfl for each of the ags: 4 * sector size
976  *    the super block to reflect the freed blocks: sector size
977  *    worst case split in allocation btrees per extent assuming 4 extents:
978  *		4 exts * 2 trees * (2 * max depth - 1) * block size
979  */
980 STATIC uint
981 xfs_calc_attrinval_reservation(
982 	struct xfs_mount	*mp)
983 {
984 	return max((xfs_calc_inode_res(mp, 1) +
985 		    xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK),
986 				     XFS_FSB_TO_B(mp, 1))),
987 		   (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
988 		    xfs_calc_buf_res(xfs_allocfree_block_count(mp, 4),
989 				     XFS_FSB_TO_B(mp, 1))));
990 }
991 
992 /*
993  * Setting an attribute at mount time.
994  *	the inode getting the attribute
995  *	the superblock for allocations
996  *	the agfs extents are allocated from
997  *	the attribute btree * max depth
998  *	the inode allocation btree
999  * Since attribute transaction space is dependent on the size of the attribute,
1000  * the calculation is done partially at mount time and partially at runtime(see
1001  * below).
1002  */
1003 STATIC uint
1004 xfs_calc_attrsetm_reservation(
1005 	struct xfs_mount	*mp)
1006 {
1007 	return XFS_DQUOT_LOGRES(mp) +
1008 		xfs_calc_inode_res(mp, 1) +
1009 		xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
1010 		xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH, XFS_FSB_TO_B(mp, 1));
1011 }
1012 
1013 /*
1014  * Setting an attribute at runtime, transaction space unit per block.
1015  * 	the superblock for allocations: sector size
1016  *	the inode bmap btree could join or split: max depth * block size
1017  * Since the runtime attribute transaction space is dependent on the total
1018  * blocks needed for the 1st bmap, here we calculate out the space unit for
1019  * one block so that the caller could figure out the total space according
1020  * to the attibute extent length in blocks by:
1021  *	ext * M_RES(mp)->tr_attrsetrt.tr_logres
1022  */
1023 STATIC uint
1024 xfs_calc_attrsetrt_reservation(
1025 	struct xfs_mount	*mp)
1026 {
1027 	return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
1028 		xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK),
1029 				 XFS_FSB_TO_B(mp, 1));
1030 }
1031 
1032 /*
1033  * Removing an attribute.
1034  *    the inode: inode size
1035  *    the attribute btree could join: max depth * block size
1036  *    the inode bmap btree could join or split: max depth * block size
1037  * And the bmap_finish transaction can free the attr blocks freed giving:
1038  *    the agf for the ag in which the blocks live: 2 * sector size
1039  *    the agfl for the ag in which the blocks live: 2 * sector size
1040  *    the superblock for the free block count: sector size
1041  *    the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
1042  */
1043 STATIC uint
1044 xfs_calc_attrrm_reservation(
1045 	struct xfs_mount	*mp)
1046 {
1047 	return XFS_DQUOT_LOGRES(mp) +
1048 		max((xfs_calc_inode_res(mp, 1) +
1049 		     xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH,
1050 				      XFS_FSB_TO_B(mp, 1)) +
1051 		     (uint)XFS_FSB_TO_B(mp,
1052 					XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) +
1053 		     xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), 0)),
1054 		    (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
1055 		     xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2),
1056 				      XFS_FSB_TO_B(mp, 1))));
1057 }
1058 
1059 /*
1060  * Clearing a bad agino number in an agi hash bucket.
1061  */
1062 STATIC uint
1063 xfs_calc_clear_agi_bucket_reservation(
1064 	struct xfs_mount	*mp)
1065 {
1066 	return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
1067 }
1068 
1069 /*
1070  * Adjusting quota limits.
1071  *    the disk quota buffer: sizeof(struct xfs_disk_dquot)
1072  */
1073 STATIC uint
1074 xfs_calc_qm_setqlim_reservation(void)
1075 {
1076 	return xfs_calc_buf_res(1, sizeof(struct xfs_disk_dquot));
1077 }
1078 
1079 /*
1080  * Allocating quota on disk if needed.
1081  *	the write transaction log space for quota file extent allocation
1082  *	the unit of quota allocation: one system block size
1083  */
1084 STATIC uint
1085 xfs_calc_qm_dqalloc_reservation(
1086 	struct xfs_mount	*mp,
1087 	bool			for_minlogsize)
1088 {
1089 	return xfs_calc_write_reservation(mp, for_minlogsize) +
1090 		xfs_calc_buf_res(1,
1091 			XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) - 1);
1092 }
1093 
1094 unsigned int
1095 xfs_calc_qm_dqalloc_reservation_minlogsize(
1096 	struct xfs_mount	*mp)
1097 {
1098 	return xfs_calc_qm_dqalloc_reservation(mp, true);
1099 }
1100 
1101 /*
1102  * Syncing the incore super block changes to disk.
1103  *     the super block to reflect the changes: sector size
1104  */
1105 STATIC uint
1106 xfs_calc_sb_reservation(
1107 	struct xfs_mount	*mp)
1108 {
1109 	return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
1110 }
1111 
1112 /*
1113  * Namespace reservations.
1114  *
1115  * These get tricky when parent pointers are enabled as we have attribute
1116  * modifications occurring from within these transactions. Rather than confuse
1117  * each of these reservation calculations with the conditional attribute
1118  * reservations, add them here in a clear and concise manner. This requires that
1119  * the attribute reservations have already been calculated.
1120  *
1121  * Note that we only include the static attribute reservation here; the runtime
1122  * reservation will have to be modified by the size of the attributes being
1123  * added/removed/modified. See the comments on the attribute reservation
1124  * calculations for more details.
1125  */
1126 STATIC void
1127 xfs_calc_namespace_reservations(
1128 	struct xfs_mount	*mp,
1129 	struct xfs_trans_resv	*resp)
1130 {
1131 	ASSERT(resp->tr_attrsetm.tr_logres > 0);
1132 
1133 	resp->tr_rename.tr_logres = xfs_calc_rename_reservation(mp);
1134 	resp->tr_rename.tr_logcount = xfs_rename_log_count(mp, resp);
1135 	resp->tr_rename.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1136 
1137 	resp->tr_link.tr_logres = xfs_calc_link_reservation(mp);
1138 	resp->tr_link.tr_logcount = xfs_link_log_count(mp, resp);
1139 	resp->tr_link.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1140 
1141 	resp->tr_remove.tr_logres = xfs_calc_remove_reservation(mp);
1142 	resp->tr_remove.tr_logcount = xfs_remove_log_count(mp, resp);
1143 	resp->tr_remove.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1144 
1145 	resp->tr_symlink.tr_logres = xfs_calc_symlink_reservation(mp);
1146 	resp->tr_symlink.tr_logcount = xfs_symlink_log_count(mp, resp);
1147 	resp->tr_symlink.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1148 
1149 	resp->tr_create.tr_logres = xfs_calc_icreate_reservation(mp);
1150 	resp->tr_create.tr_logcount = xfs_icreate_log_count(mp, resp);
1151 	resp->tr_create.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1152 
1153 	resp->tr_mkdir.tr_logres = xfs_calc_mkdir_reservation(mp);
1154 	resp->tr_mkdir.tr_logcount = xfs_mkdir_log_count(mp, resp);
1155 	resp->tr_mkdir.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1156 }
1157 
1158 void
1159 xfs_trans_resv_calc(
1160 	struct xfs_mount	*mp,
1161 	struct xfs_trans_resv	*resp)
1162 {
1163 	int			logcount_adj = 0;
1164 
1165 	/*
1166 	 * The following transactions are logged in physical format and
1167 	 * require a permanent reservation on space.
1168 	 */
1169 	resp->tr_write.tr_logres = xfs_calc_write_reservation(mp, false);
1170 	resp->tr_write.tr_logcount = XFS_WRITE_LOG_COUNT;
1171 	resp->tr_write.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1172 
1173 	resp->tr_itruncate.tr_logres = xfs_calc_itruncate_reservation(mp, false);
1174 	resp->tr_itruncate.tr_logcount = XFS_ITRUNCATE_LOG_COUNT;
1175 	resp->tr_itruncate.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1176 
1177 	resp->tr_create_tmpfile.tr_logres =
1178 			xfs_calc_create_tmpfile_reservation(mp);
1179 	resp->tr_create_tmpfile.tr_logcount = XFS_CREATE_TMPFILE_LOG_COUNT;
1180 	resp->tr_create_tmpfile.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1181 
1182 	resp->tr_ifree.tr_logres = xfs_calc_ifree_reservation(mp);
1183 	resp->tr_ifree.tr_logcount = XFS_INACTIVE_LOG_COUNT;
1184 	resp->tr_ifree.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1185 
1186 	resp->tr_addafork.tr_logres = xfs_calc_addafork_reservation(mp);
1187 	resp->tr_addafork.tr_logcount = XFS_ADDAFORK_LOG_COUNT;
1188 	resp->tr_addafork.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1189 
1190 	resp->tr_attrinval.tr_logres = xfs_calc_attrinval_reservation(mp);
1191 	resp->tr_attrinval.tr_logcount = XFS_ATTRINVAL_LOG_COUNT;
1192 	resp->tr_attrinval.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1193 
1194 	resp->tr_attrsetm.tr_logres = xfs_calc_attrsetm_reservation(mp);
1195 	resp->tr_attrsetm.tr_logcount = XFS_ATTRSET_LOG_COUNT;
1196 	resp->tr_attrsetm.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1197 
1198 	resp->tr_attrrm.tr_logres = xfs_calc_attrrm_reservation(mp);
1199 	resp->tr_attrrm.tr_logcount = XFS_ATTRRM_LOG_COUNT;
1200 	resp->tr_attrrm.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1201 
1202 	resp->tr_growrtalloc.tr_logres = xfs_calc_growrtalloc_reservation(mp);
1203 	resp->tr_growrtalloc.tr_logcount = XFS_DEFAULT_PERM_LOG_COUNT;
1204 	resp->tr_growrtalloc.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1205 
1206 	resp->tr_qm_dqalloc.tr_logres = xfs_calc_qm_dqalloc_reservation(mp,
1207 			false);
1208 	resp->tr_qm_dqalloc.tr_logcount = XFS_WRITE_LOG_COUNT;
1209 	resp->tr_qm_dqalloc.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1210 
1211 	xfs_calc_namespace_reservations(mp, resp);
1212 
1213 	/*
1214 	 * The following transactions are logged in logical format with
1215 	 * a default log count.
1216 	 */
1217 	resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation();
1218 	resp->tr_qm_setqlim.tr_logcount = XFS_DEFAULT_LOG_COUNT;
1219 
1220 	resp->tr_sb.tr_logres = xfs_calc_sb_reservation(mp);
1221 	resp->tr_sb.tr_logcount = XFS_DEFAULT_LOG_COUNT;
1222 
1223 	/* growdata requires permanent res; it can free space to the last AG */
1224 	resp->tr_growdata.tr_logres = xfs_calc_growdata_reservation(mp);
1225 	resp->tr_growdata.tr_logcount = XFS_DEFAULT_PERM_LOG_COUNT;
1226 	resp->tr_growdata.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1227 
1228 	/* The following transaction are logged in logical format */
1229 	resp->tr_ichange.tr_logres = xfs_calc_ichange_reservation(mp);
1230 	resp->tr_fsyncts.tr_logres = xfs_calc_swrite_reservation(mp);
1231 	resp->tr_writeid.tr_logres = xfs_calc_writeid_reservation(mp);
1232 	resp->tr_attrsetrt.tr_logres = xfs_calc_attrsetrt_reservation(mp);
1233 	resp->tr_clearagi.tr_logres = xfs_calc_clear_agi_bucket_reservation(mp);
1234 	resp->tr_growrtzero.tr_logres = xfs_calc_growrtzero_reservation(mp);
1235 	resp->tr_growrtfree.tr_logres = xfs_calc_growrtfree_reservation(mp);
1236 
1237 	/*
1238 	 * Add one logcount for BUI items that appear with rmap or reflink,
1239 	 * one logcount for refcount intent items, and one logcount for rmap
1240 	 * intent items.
1241 	 */
1242 	if (xfs_has_reflink(mp) || xfs_has_rmapbt(mp))
1243 		logcount_adj++;
1244 	if (xfs_has_reflink(mp))
1245 		logcount_adj++;
1246 	if (xfs_has_rmapbt(mp))
1247 		logcount_adj++;
1248 
1249 	resp->tr_itruncate.tr_logcount += logcount_adj;
1250 	resp->tr_write.tr_logcount += logcount_adj;
1251 	resp->tr_qm_dqalloc.tr_logcount += logcount_adj;
1252 }
1253