1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4 * Copyright (C) 2010 Red Hat, Inc.
5 * All Rights Reserved.
6 */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_da_format.h"
15 #include "xfs_da_btree.h"
16 #include "xfs_inode.h"
17 #include "xfs_bmap_btree.h"
18 #include "xfs_quota.h"
19 #include "xfs_trans.h"
20 #include "xfs_qm.h"
21 #include "xfs_trans_space.h"
22 #include "xfs_rtbitmap.h"
23 #include "xfs_attr_item.h"
24 #include "xfs_log.h"
25
26 #define _ALLOC true
27 #define _FREE false
28
29 /*
30 * A buffer has a format structure overhead in the log in addition
31 * to the data, so we need to take this into account when reserving
32 * space in a transaction for a buffer. Round the space required up
33 * to a multiple of 128 bytes so that we don't change the historical
34 * reservation that has been used for this overhead.
35 */
36 STATIC uint
xfs_buf_log_overhead(void)37 xfs_buf_log_overhead(void)
38 {
39 return round_up(sizeof(struct xlog_op_header) +
40 sizeof(struct xfs_buf_log_format), 128);
41 }
42
43 /*
44 * Calculate out transaction log reservation per item in bytes.
45 *
46 * The nbufs argument is used to indicate the number of items that
47 * will be changed in a transaction. size is used to tell how many
48 * bytes should be reserved per item.
49 */
50 STATIC uint
xfs_calc_buf_res(uint nbufs,uint size)51 xfs_calc_buf_res(
52 uint nbufs,
53 uint size)
54 {
55 return nbufs * (size + xfs_buf_log_overhead());
56 }
57
58 /*
59 * Per-extent log reservation for the btree changes involved in freeing or
60 * allocating an extent. In classic XFS there were two trees that will be
61 * modified (bnobt + cntbt). With rmap enabled, there are three trees
62 * (rmapbt). The number of blocks reserved is based on the formula:
63 *
64 * num trees * ((2 blocks/level * max depth) - 1)
65 *
66 * Keep in mind that max depth is calculated separately for each type of tree.
67 */
68 uint
xfs_allocfree_block_count(struct xfs_mount * mp,uint num_ops)69 xfs_allocfree_block_count(
70 struct xfs_mount *mp,
71 uint num_ops)
72 {
73 uint blocks;
74
75 blocks = num_ops * 2 * (2 * mp->m_alloc_maxlevels - 1);
76 if (xfs_has_rmapbt(mp))
77 blocks += num_ops * (2 * mp->m_rmap_maxlevels - 1);
78
79 return blocks;
80 }
81
82 /*
83 * Per-extent log reservation for refcount btree changes. These are never done
84 * in the same transaction as an allocation or a free, so we compute them
85 * separately.
86 */
87 static unsigned int
xfs_refcountbt_block_count(struct xfs_mount * mp,unsigned int num_ops)88 xfs_refcountbt_block_count(
89 struct xfs_mount *mp,
90 unsigned int num_ops)
91 {
92 return num_ops * (2 * mp->m_refc_maxlevels - 1);
93 }
94
95 static unsigned int
xfs_rtrefcountbt_block_count(struct xfs_mount * mp,unsigned int num_ops)96 xfs_rtrefcountbt_block_count(
97 struct xfs_mount *mp,
98 unsigned int num_ops)
99 {
100 return num_ops * (2 * mp->m_rtrefc_maxlevels - 1);
101 }
102
103 /*
104 * Logging inodes is really tricksy. They are logged in memory format,
105 * which means that what we write into the log doesn't directly translate into
106 * the amount of space they use on disk.
107 *
108 * Case in point - btree format forks in memory format use more space than the
109 * on-disk format. In memory, the buffer contains a normal btree block header so
110 * the btree code can treat it as though it is just another generic buffer.
111 * However, when we write it to the inode fork, we don't write all of this
112 * header as it isn't needed. e.g. the root is only ever in the inode, so
113 * there's no need for sibling pointers which would waste 16 bytes of space.
114 *
115 * Hence when we have an inode with a maximally sized btree format fork, then
116 * amount of information we actually log is greater than the size of the inode
117 * on disk. Hence we need an inode reservation function that calculates all this
118 * correctly. So, we log:
119 *
120 * - 4 log op headers for object
121 * - for the ilf, the inode core and 2 forks
122 * - inode log format object
123 * - the inode core
124 * - two inode forks containing bmap btree root blocks.
125 * - the btree data contained by both forks will fit into the inode size,
126 * hence when combined with the inode core above, we have a total of the
127 * actual inode size.
128 * - the BMBT headers need to be accounted separately, as they are
129 * additional to the records and pointers that fit inside the inode
130 * forks.
131 */
132 STATIC uint
xfs_calc_inode_res(struct xfs_mount * mp,uint ninodes)133 xfs_calc_inode_res(
134 struct xfs_mount *mp,
135 uint ninodes)
136 {
137 return ninodes *
138 (4 * sizeof(struct xlog_op_header) +
139 sizeof(struct xfs_inode_log_format) +
140 mp->m_sb.sb_inodesize +
141 2 * xfs_bmbt_block_len(mp));
142 }
143
144 /*
145 * Inode btree record insertion/removal modifies the inode btree and free space
146 * btrees (since the inobt does not use the agfl). This requires the following
147 * reservation:
148 *
149 * the inode btree: max depth * blocksize
150 * the allocation btrees: 2 trees * (max depth - 1) * block size
151 *
152 * The caller must account for SB and AG header modifications, etc.
153 */
154 STATIC uint
xfs_calc_inobt_res(struct xfs_mount * mp)155 xfs_calc_inobt_res(
156 struct xfs_mount *mp)
157 {
158 return xfs_calc_buf_res(M_IGEO(mp)->inobt_maxlevels,
159 XFS_FSB_TO_B(mp, 1)) +
160 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
161 XFS_FSB_TO_B(mp, 1));
162 }
163
164 /*
165 * The free inode btree is a conditional feature. The behavior differs slightly
166 * from that of the traditional inode btree in that the finobt tracks records
167 * for inode chunks with at least one free inode. A record can be removed from
168 * the tree during individual inode allocation. Therefore the finobt
169 * reservation is unconditional for both the inode chunk allocation and
170 * individual inode allocation (modify) cases.
171 *
172 * Behavior aside, the reservation for finobt modification is equivalent to the
173 * traditional inobt: cover a full finobt shape change plus block allocation.
174 */
175 STATIC uint
xfs_calc_finobt_res(struct xfs_mount * mp)176 xfs_calc_finobt_res(
177 struct xfs_mount *mp)
178 {
179 if (!xfs_has_finobt(mp))
180 return 0;
181
182 return xfs_calc_inobt_res(mp);
183 }
184
185 /*
186 * Calculate the reservation required to allocate or free an inode chunk. This
187 * includes:
188 *
189 * the allocation btrees: 2 trees * (max depth - 1) * block size
190 * the inode chunk: m_ino_geo.ialloc_blks * N
191 *
192 * The size N of the inode chunk reservation depends on whether it is for
193 * allocation or free and which type of create transaction is in use. An inode
194 * chunk free always invalidates the buffers and only requires reservation for
195 * headers (N == 0). An inode chunk allocation requires a chunk sized
196 * reservation on v4 and older superblocks to initialize the chunk. No chunk
197 * reservation is required for allocation on v5 supers, which use ordered
198 * buffers to initialize.
199 */
200 STATIC uint
xfs_calc_inode_chunk_res(struct xfs_mount * mp,bool alloc)201 xfs_calc_inode_chunk_res(
202 struct xfs_mount *mp,
203 bool alloc)
204 {
205 uint res, size = 0;
206
207 res = xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
208 XFS_FSB_TO_B(mp, 1));
209 if (alloc) {
210 /* icreate tx uses ordered buffers */
211 if (xfs_has_v3inodes(mp))
212 return res;
213 size = XFS_FSB_TO_B(mp, 1);
214 }
215
216 res += xfs_calc_buf_res(M_IGEO(mp)->ialloc_blks, size);
217 return res;
218 }
219
220 /*
221 * Per-extent log reservation for the btree changes involved in freeing or
222 * allocating a realtime extent. We have to be able to log as many rtbitmap
223 * blocks as needed to mark inuse XFS_BMBT_MAX_EXTLEN blocks' worth of realtime
224 * extents, as well as the realtime summary block (t1). Realtime rmap btree
225 * operations happen in a second transaction, so factor in a couple of rtrmapbt
226 * splits (t2).
227 */
228 static unsigned int
xfs_rtalloc_block_count(struct xfs_mount * mp,unsigned int num_ops)229 xfs_rtalloc_block_count(
230 struct xfs_mount *mp,
231 unsigned int num_ops)
232 {
233 unsigned int rtbmp_blocks;
234 xfs_rtxlen_t rtxlen;
235 unsigned int t1, t2 = 0;
236
237 rtxlen = xfs_extlen_to_rtxlen(mp, XFS_MAX_BMBT_EXTLEN);
238 rtbmp_blocks = xfs_rtbitmap_blockcount_len(mp, rtxlen);
239 t1 = (rtbmp_blocks + 1) * num_ops;
240
241 if (xfs_has_rmapbt(mp))
242 t2 = num_ops * (2 * mp->m_rtrmap_maxlevels - 1);
243
244 return max(t1, t2);
245 }
246
247 /*
248 * Various log reservation values.
249 *
250 * These are based on the size of the file system block because that is what
251 * most transactions manipulate. Each adds in an additional 128 bytes per
252 * item logged to try to account for the overhead of the transaction mechanism.
253 *
254 * Note: Most of the reservations underestimate the number of allocation
255 * groups into which they could free extents in the xfs_defer_finish() call.
256 * This is because the number in the worst case is quite high and quite
257 * unusual. In order to fix this we need to change xfs_defer_finish() to free
258 * extents in only a single AG at a time. This will require changes to the
259 * EFI code as well, however, so that the EFI for the extents not freed is
260 * logged again in each transaction. See SGI PV #261917.
261 *
262 * Reservation functions here avoid a huge stack in xfs_trans_init due to
263 * register overflow from temporaries in the calculations.
264 */
265
266 /*
267 * Compute the log reservation required to handle the refcount update
268 * transaction. Refcount updates are always done via deferred log items.
269 *
270 * This is calculated as the max of:
271 * Data device refcount updates (t1):
272 * the agfs of the ags containing the blocks: nr_ops * sector size
273 * the refcount btrees: nr_ops * 1 trees * (2 * max depth - 1) * block size
274 * Realtime refcount updates (t2);
275 * the rt refcount inode
276 * the rtrefcount btrees: nr_ops * 1 trees * (2 * max depth - 1) * block size
277 */
278 static unsigned int
xfs_calc_refcountbt_reservation(struct xfs_mount * mp,unsigned int nr_ops)279 xfs_calc_refcountbt_reservation(
280 struct xfs_mount *mp,
281 unsigned int nr_ops)
282 {
283 unsigned int blksz = XFS_FSB_TO_B(mp, 1);
284 unsigned int t1, t2 = 0;
285
286 if (!xfs_has_reflink(mp))
287 return 0;
288
289 t1 = xfs_calc_buf_res(nr_ops, mp->m_sb.sb_sectsize) +
290 xfs_calc_buf_res(xfs_refcountbt_block_count(mp, nr_ops), blksz);
291
292 if (xfs_has_realtime(mp))
293 t2 = xfs_calc_inode_res(mp, 1) +
294 xfs_calc_buf_res(xfs_rtrefcountbt_block_count(mp, nr_ops),
295 blksz);
296
297 return max(t1, t2);
298 }
299
300 /*
301 * In a write transaction we can allocate a maximum of 2
302 * extents. This gives (t1):
303 * the inode getting the new extents: inode size
304 * the inode's bmap btree: max depth * block size
305 * the agfs of the ags from which the extents are allocated: 2 * sector
306 * the superblock free block counter: sector size
307 * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
308 * Or, if we're writing to a realtime file (t2):
309 * the inode getting the new extents: inode size
310 * the inode's bmap btree: max depth * block size
311 * the agfs of the ags from which the extents are allocated: 2 * sector
312 * the superblock free block counter: sector size
313 * the realtime bitmap: ((XFS_BMBT_MAX_EXTLEN / rtextsize) / NBBY) bytes
314 * the realtime summary: 1 block
315 * the allocation btrees: 2 trees * (2 * max depth - 1) * block size
316 * And the bmap_finish transaction can free bmap blocks in a join (t3):
317 * the agfs of the ags containing the blocks: 2 * sector size
318 * the agfls of the ags containing the blocks: 2 * sector size
319 * the super block free block counter: sector size
320 * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
321 * And any refcount updates that happen in a separate transaction (t4).
322 */
323 STATIC uint
xfs_calc_write_reservation(struct xfs_mount * mp,bool for_minlogsize)324 xfs_calc_write_reservation(
325 struct xfs_mount *mp,
326 bool for_minlogsize)
327 {
328 unsigned int t1, t2, t3, t4;
329 unsigned int blksz = XFS_FSB_TO_B(mp, 1);
330
331 t1 = xfs_calc_inode_res(mp, 1) +
332 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), blksz) +
333 xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
334 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2), blksz);
335
336 if (xfs_has_realtime(mp)) {
337 t2 = xfs_calc_inode_res(mp, 1) +
338 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
339 blksz) +
340 xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
341 xfs_calc_buf_res(xfs_rtalloc_block_count(mp, 1), blksz) +
342 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1), blksz);
343 } else {
344 t2 = 0;
345 }
346
347 t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
348 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2), blksz);
349
350 /*
351 * In the early days of reflink, we included enough reservation to log
352 * two refcountbt splits for each transaction. The codebase runs
353 * refcountbt updates in separate transactions now, so to compute the
354 * minimum log size, add the refcountbtree splits back to t1 and t3 and
355 * do not account them separately as t4. Reflink did not support
356 * realtime when the reservations were established, so no adjustment to
357 * t2 is needed.
358 */
359 if (for_minlogsize) {
360 unsigned int adj = 0;
361
362 if (xfs_has_reflink(mp))
363 adj = xfs_calc_buf_res(
364 xfs_refcountbt_block_count(mp, 2),
365 blksz);
366 t1 += adj;
367 t3 += adj;
368 return XFS_DQUOT_LOGRES + max3(t1, t2, t3);
369 }
370
371 t4 = xfs_calc_refcountbt_reservation(mp, 1);
372 return XFS_DQUOT_LOGRES + max(t4, max3(t1, t2, t3));
373 }
374
375 unsigned int
xfs_calc_write_reservation_minlogsize(struct xfs_mount * mp)376 xfs_calc_write_reservation_minlogsize(
377 struct xfs_mount *mp)
378 {
379 return xfs_calc_write_reservation(mp, true);
380 }
381
382 /*
383 * In truncating a file we free up to two extents at once. We can modify (t1):
384 * the inode being truncated: inode size
385 * the inode's bmap btree: (max depth + 1) * block size
386 * And the bmap_finish transaction can free the blocks and bmap blocks (t2):
387 * the agf for each of the ags: 4 * sector size
388 * the agfl for each of the ags: 4 * sector size
389 * the super block to reflect the freed blocks: sector size
390 * worst case split in allocation btrees per extent assuming 4 extents:
391 * 4 exts * 2 trees * (2 * max depth - 1) * block size
392 * Or, if it's a realtime file (t3):
393 * the agf for each of the ags: 2 * sector size
394 * the agfl for each of the ags: 2 * sector size
395 * the super block to reflect the freed blocks: sector size
396 * the realtime bitmap:
397 * 2 exts * ((XFS_BMBT_MAX_EXTLEN / rtextsize) / NBBY) bytes
398 * the realtime summary: 2 exts * 1 block
399 * worst case split in allocation btrees per extent assuming 2 extents:
400 * 2 exts * 2 trees * (2 * max depth - 1) * block size
401 * And any refcount updates that happen in a separate transaction (t4).
402 */
403 STATIC uint
xfs_calc_itruncate_reservation(struct xfs_mount * mp,bool for_minlogsize)404 xfs_calc_itruncate_reservation(
405 struct xfs_mount *mp,
406 bool for_minlogsize)
407 {
408 unsigned int t1, t2, t3, t4;
409 unsigned int blksz = XFS_FSB_TO_B(mp, 1);
410
411 t1 = xfs_calc_inode_res(mp, 1) +
412 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, blksz);
413
414 t2 = xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
415 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 4), blksz);
416
417 if (xfs_has_realtime(mp)) {
418 t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
419 xfs_calc_buf_res(xfs_rtalloc_block_count(mp, 2), blksz) +
420 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2), blksz);
421 } else {
422 t3 = 0;
423 }
424
425 /*
426 * In the early days of reflink, we included enough reservation to log
427 * four refcountbt splits in the same transaction as bnobt/cntbt
428 * updates. The codebase runs refcountbt updates in separate
429 * transactions now, so to compute the minimum log size, add the
430 * refcount btree splits back here and do not compute them separately
431 * as t4. Reflink did not support realtime when the reservations were
432 * established, so do not adjust t3.
433 */
434 if (for_minlogsize) {
435 if (xfs_has_reflink(mp))
436 t2 += xfs_calc_buf_res(
437 xfs_refcountbt_block_count(mp, 4),
438 blksz);
439
440 return XFS_DQUOT_LOGRES + max3(t1, t2, t3);
441 }
442
443 t4 = xfs_calc_refcountbt_reservation(mp, 2);
444 return XFS_DQUOT_LOGRES + max(t4, max3(t1, t2, t3));
445 }
446
447 unsigned int
xfs_calc_itruncate_reservation_minlogsize(struct xfs_mount * mp)448 xfs_calc_itruncate_reservation_minlogsize(
449 struct xfs_mount *mp)
450 {
451 return xfs_calc_itruncate_reservation(mp, true);
452 }
453
xfs_calc_pptr_link_overhead(void)454 static inline unsigned int xfs_calc_pptr_link_overhead(void)
455 {
456 return sizeof(struct xfs_attri_log_format) +
457 xlog_calc_iovec_len(sizeof(struct xfs_parent_rec)) +
458 xlog_calc_iovec_len(MAXNAMELEN - 1);
459 }
xfs_calc_pptr_unlink_overhead(void)460 static inline unsigned int xfs_calc_pptr_unlink_overhead(void)
461 {
462 return sizeof(struct xfs_attri_log_format) +
463 xlog_calc_iovec_len(sizeof(struct xfs_parent_rec)) +
464 xlog_calc_iovec_len(MAXNAMELEN - 1);
465 }
xfs_calc_pptr_replace_overhead(void)466 static inline unsigned int xfs_calc_pptr_replace_overhead(void)
467 {
468 return sizeof(struct xfs_attri_log_format) +
469 xlog_calc_iovec_len(sizeof(struct xfs_parent_rec)) +
470 xlog_calc_iovec_len(MAXNAMELEN - 1) +
471 xlog_calc_iovec_len(sizeof(struct xfs_parent_rec)) +
472 xlog_calc_iovec_len(MAXNAMELEN - 1);
473 }
474
475 /*
476 * In renaming a files we can modify:
477 * the five inodes involved: 5 * inode size
478 * the two directory btrees: 2 * (max depth + v2) * dir block size
479 * the two directory bmap btrees: 2 * max depth * block size
480 * And the bmap_finish transaction can free dir and bmap blocks (two sets
481 * of bmap blocks) giving (t2):
482 * the agf for the ags in which the blocks live: 3 * sector size
483 * the agfl for the ags in which the blocks live: 3 * sector size
484 * the superblock for the free block count: sector size
485 * the allocation btrees: 3 exts * 2 trees * (2 * max depth - 1) * block size
486 * If parent pointers are enabled (t3), then each transaction in the chain
487 * must be capable of setting or removing the extended attribute
488 * containing the parent information. It must also be able to handle
489 * the three xattr intent items that track the progress of the parent
490 * pointer update.
491 */
492 STATIC uint
xfs_calc_rename_reservation(struct xfs_mount * mp)493 xfs_calc_rename_reservation(
494 struct xfs_mount *mp)
495 {
496 unsigned int overhead = XFS_DQUOT_LOGRES;
497 struct xfs_trans_resv *resp = M_RES(mp);
498 unsigned int t1, t2, t3 = 0;
499
500 t1 = xfs_calc_inode_res(mp, 5) +
501 xfs_calc_buf_res(2 * XFS_DIROP_LOG_COUNT(mp),
502 XFS_FSB_TO_B(mp, 1));
503
504 t2 = xfs_calc_buf_res(7, mp->m_sb.sb_sectsize) +
505 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 3),
506 XFS_FSB_TO_B(mp, 1));
507
508 if (xfs_has_parent(mp)) {
509 unsigned int rename_overhead, exchange_overhead;
510
511 t3 = max(resp->tr_attrsetm.tr_logres,
512 resp->tr_attrrm.tr_logres);
513
514 /*
515 * For a standard rename, the three xattr intent log items
516 * are (1) replacing the pptr for the source file; (2)
517 * removing the pptr on the dest file; and (3) adding a
518 * pptr for the whiteout file in the src dir.
519 *
520 * For an RENAME_EXCHANGE, there are two xattr intent
521 * items to replace the pptr for both src and dest
522 * files. Link counts don't change and there is no
523 * whiteout.
524 *
525 * In the worst case we can end up relogging all log
526 * intent items to allow the log tail to move ahead, so
527 * they become overhead added to each transaction in a
528 * processing chain.
529 */
530 rename_overhead = xfs_calc_pptr_replace_overhead() +
531 xfs_calc_pptr_unlink_overhead() +
532 xfs_calc_pptr_link_overhead();
533 exchange_overhead = 2 * xfs_calc_pptr_replace_overhead();
534
535 overhead += max(rename_overhead, exchange_overhead);
536 }
537
538 return overhead + max3(t1, t2, t3);
539 }
540
541 static inline unsigned int
xfs_rename_log_count(struct xfs_mount * mp,struct xfs_trans_resv * resp)542 xfs_rename_log_count(
543 struct xfs_mount *mp,
544 struct xfs_trans_resv *resp)
545 {
546 /* One for the rename, one more for freeing blocks */
547 unsigned int ret = XFS_RENAME_LOG_COUNT;
548
549 /*
550 * Pre-reserve enough log reservation to handle the transaction
551 * rolling needed to remove or add one parent pointer.
552 */
553 if (xfs_has_parent(mp))
554 ret += max(resp->tr_attrsetm.tr_logcount,
555 resp->tr_attrrm.tr_logcount);
556
557 return ret;
558 }
559
560 /*
561 * For removing an inode from unlinked list at first, we can modify:
562 * the agi hash list and counters: sector size
563 * the on disk inode before ours in the agi hash list: inode cluster size
564 * the on disk inode in the agi hash list: inode cluster size
565 */
566 STATIC uint
xfs_calc_iunlink_remove_reservation(struct xfs_mount * mp)567 xfs_calc_iunlink_remove_reservation(
568 struct xfs_mount *mp)
569 {
570 return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
571 2 * M_IGEO(mp)->inode_cluster_size;
572 }
573
574 static inline unsigned int
xfs_link_log_count(struct xfs_mount * mp,struct xfs_trans_resv * resp)575 xfs_link_log_count(
576 struct xfs_mount *mp,
577 struct xfs_trans_resv *resp)
578 {
579 unsigned int ret = XFS_LINK_LOG_COUNT;
580
581 /*
582 * Pre-reserve enough log reservation to handle the transaction
583 * rolling needed to add one parent pointer.
584 */
585 if (xfs_has_parent(mp))
586 ret += resp->tr_attrsetm.tr_logcount;
587
588 return ret;
589 }
590
591 /*
592 * For creating a link to an inode:
593 * the parent directory inode: inode size
594 * the linked inode: inode size
595 * the directory btree could split: (max depth + v2) * dir block size
596 * the directory bmap btree could join or split: (max depth + v2) * blocksize
597 * And the bmap_finish transaction can free some bmap blocks giving:
598 * the agf for the ag in which the blocks live: sector size
599 * the agfl for the ag in which the blocks live: sector size
600 * the superblock for the free block count: sector size
601 * the allocation btrees: 2 trees * (2 * max depth - 1) * block size
602 */
603 STATIC uint
xfs_calc_link_reservation(struct xfs_mount * mp)604 xfs_calc_link_reservation(
605 struct xfs_mount *mp)
606 {
607 unsigned int overhead = XFS_DQUOT_LOGRES;
608 struct xfs_trans_resv *resp = M_RES(mp);
609 unsigned int t1, t2, t3 = 0;
610
611 overhead += xfs_calc_iunlink_remove_reservation(mp);
612 t1 = xfs_calc_inode_res(mp, 2) +
613 xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1));
614 t2 = xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
615 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
616 XFS_FSB_TO_B(mp, 1));
617
618 if (xfs_has_parent(mp)) {
619 t3 = resp->tr_attrsetm.tr_logres;
620 overhead += xfs_calc_pptr_link_overhead();
621 }
622
623 return overhead + max3(t1, t2, t3);
624 }
625
626 /*
627 * For adding an inode to unlinked list we can modify:
628 * the agi hash list: sector size
629 * the on disk inode: inode cluster size
630 */
631 STATIC uint
xfs_calc_iunlink_add_reservation(xfs_mount_t * mp)632 xfs_calc_iunlink_add_reservation(xfs_mount_t *mp)
633 {
634 return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
635 M_IGEO(mp)->inode_cluster_size;
636 }
637
638 static inline unsigned int
xfs_remove_log_count(struct xfs_mount * mp,struct xfs_trans_resv * resp)639 xfs_remove_log_count(
640 struct xfs_mount *mp,
641 struct xfs_trans_resv *resp)
642 {
643 unsigned int ret = XFS_REMOVE_LOG_COUNT;
644
645 /*
646 * Pre-reserve enough log reservation to handle the transaction
647 * rolling needed to add one parent pointer.
648 */
649 if (xfs_has_parent(mp))
650 ret += resp->tr_attrrm.tr_logcount;
651
652 return ret;
653 }
654
655 /*
656 * For removing a directory entry we can modify:
657 * the parent directory inode: inode size
658 * the removed inode: inode size
659 * the directory btree could join: (max depth + v2) * dir block size
660 * the directory bmap btree could join or split: (max depth + v2) * blocksize
661 * And the bmap_finish transaction can free the dir and bmap blocks giving:
662 * the agf for the ag in which the blocks live: 2 * sector size
663 * the agfl for the ag in which the blocks live: 2 * sector size
664 * the superblock for the free block count: sector size
665 * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
666 */
667 STATIC uint
xfs_calc_remove_reservation(struct xfs_mount * mp)668 xfs_calc_remove_reservation(
669 struct xfs_mount *mp)
670 {
671 unsigned int overhead = XFS_DQUOT_LOGRES;
672 struct xfs_trans_resv *resp = M_RES(mp);
673 unsigned int t1, t2, t3 = 0;
674
675 overhead += xfs_calc_iunlink_add_reservation(mp);
676
677 t1 = xfs_calc_inode_res(mp, 2) +
678 xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1));
679 t2 = xfs_calc_buf_res(4, mp->m_sb.sb_sectsize) +
680 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2),
681 XFS_FSB_TO_B(mp, 1));
682
683 if (xfs_has_parent(mp)) {
684 t3 = resp->tr_attrrm.tr_logres;
685 overhead += xfs_calc_pptr_unlink_overhead();
686 }
687
688 return overhead + max3(t1, t2, t3);
689 }
690
691 /*
692 * For create, break it in to the two cases that the transaction
693 * covers. We start with the modify case - allocation done by modification
694 * of the state of existing inodes - and the allocation case.
695 */
696
697 /*
698 * For create we can modify:
699 * the parent directory inode: inode size
700 * the new inode: inode size
701 * the inode btree entry: block size
702 * the superblock for the nlink flag: sector size
703 * the directory btree: (max depth + v2) * dir block size
704 * the directory inode's bmap btree: (max depth + v2) * block size
705 * the finobt (record modification and allocation btrees)
706 */
707 STATIC uint
xfs_calc_create_resv_modify(struct xfs_mount * mp)708 xfs_calc_create_resv_modify(
709 struct xfs_mount *mp)
710 {
711 return xfs_calc_inode_res(mp, 2) +
712 xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
713 (uint)XFS_FSB_TO_B(mp, 1) +
714 xfs_calc_buf_res(XFS_DIROP_LOG_COUNT(mp), XFS_FSB_TO_B(mp, 1)) +
715 xfs_calc_finobt_res(mp);
716 }
717
718 /*
719 * For icreate we can allocate some inodes giving:
720 * the agi and agf of the ag getting the new inodes: 2 * sectorsize
721 * the superblock for the nlink flag: sector size
722 * the inode chunk (allocation, optional init)
723 * the inobt (record insertion)
724 * the finobt (optional, record insertion)
725 */
726 STATIC uint
xfs_calc_icreate_resv_alloc(struct xfs_mount * mp)727 xfs_calc_icreate_resv_alloc(
728 struct xfs_mount *mp)
729 {
730 return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
731 mp->m_sb.sb_sectsize +
732 xfs_calc_inode_chunk_res(mp, _ALLOC) +
733 xfs_calc_inobt_res(mp) +
734 xfs_calc_finobt_res(mp);
735 }
736
737 static inline unsigned int
xfs_icreate_log_count(struct xfs_mount * mp,struct xfs_trans_resv * resp)738 xfs_icreate_log_count(
739 struct xfs_mount *mp,
740 struct xfs_trans_resv *resp)
741 {
742 unsigned int ret = XFS_CREATE_LOG_COUNT;
743
744 /*
745 * Pre-reserve enough log reservation to handle the transaction
746 * rolling needed to add one parent pointer.
747 */
748 if (xfs_has_parent(mp))
749 ret += resp->tr_attrsetm.tr_logcount;
750
751 return ret;
752 }
753
754 STATIC uint
xfs_calc_icreate_reservation(struct xfs_mount * mp)755 xfs_calc_icreate_reservation(
756 struct xfs_mount *mp)
757 {
758 struct xfs_trans_resv *resp = M_RES(mp);
759 unsigned int overhead = XFS_DQUOT_LOGRES;
760 unsigned int t1, t2, t3 = 0;
761
762 t1 = xfs_calc_icreate_resv_alloc(mp);
763 t2 = xfs_calc_create_resv_modify(mp);
764
765 if (xfs_has_parent(mp)) {
766 t3 = resp->tr_attrsetm.tr_logres;
767 overhead += xfs_calc_pptr_link_overhead();
768 }
769
770 return overhead + max3(t1, t2, t3);
771 }
772
773 STATIC uint
xfs_calc_create_tmpfile_reservation(struct xfs_mount * mp)774 xfs_calc_create_tmpfile_reservation(
775 struct xfs_mount *mp)
776 {
777 uint res = XFS_DQUOT_LOGRES;
778
779 res += xfs_calc_icreate_resv_alloc(mp);
780 return res + xfs_calc_iunlink_add_reservation(mp);
781 }
782
783 static inline unsigned int
xfs_mkdir_log_count(struct xfs_mount * mp,struct xfs_trans_resv * resp)784 xfs_mkdir_log_count(
785 struct xfs_mount *mp,
786 struct xfs_trans_resv *resp)
787 {
788 unsigned int ret = XFS_MKDIR_LOG_COUNT;
789
790 /*
791 * Pre-reserve enough log reservation to handle the transaction
792 * rolling needed to add one parent pointer.
793 */
794 if (xfs_has_parent(mp))
795 ret += resp->tr_attrsetm.tr_logcount;
796
797 return ret;
798 }
799
800 /*
801 * Making a new directory is the same as creating a new file.
802 */
803 STATIC uint
xfs_calc_mkdir_reservation(struct xfs_mount * mp)804 xfs_calc_mkdir_reservation(
805 struct xfs_mount *mp)
806 {
807 return xfs_calc_icreate_reservation(mp);
808 }
809
810 static inline unsigned int
xfs_symlink_log_count(struct xfs_mount * mp,struct xfs_trans_resv * resp)811 xfs_symlink_log_count(
812 struct xfs_mount *mp,
813 struct xfs_trans_resv *resp)
814 {
815 unsigned int ret = XFS_SYMLINK_LOG_COUNT;
816
817 /*
818 * Pre-reserve enough log reservation to handle the transaction
819 * rolling needed to add one parent pointer.
820 */
821 if (xfs_has_parent(mp))
822 ret += resp->tr_attrsetm.tr_logcount;
823
824 return ret;
825 }
826
827 /*
828 * Making a new symplink is the same as creating a new file, but
829 * with the added blocks for remote symlink data which can be up to 1kB in
830 * length (XFS_SYMLINK_MAXLEN).
831 */
832 STATIC uint
xfs_calc_symlink_reservation(struct xfs_mount * mp)833 xfs_calc_symlink_reservation(
834 struct xfs_mount *mp)
835 {
836 return xfs_calc_icreate_reservation(mp) +
837 xfs_calc_buf_res(1, XFS_SYMLINK_MAXLEN);
838 }
839
840 /*
841 * In freeing an inode we can modify:
842 * the inode being freed: inode size
843 * the super block free inode counter, AGF and AGFL: sector size
844 * the on disk inode (agi unlinked list removal)
845 * the inode chunk (invalidated, headers only)
846 * the inode btree
847 * the finobt (record insertion, removal or modification)
848 *
849 * Note that the inode chunk res. includes an allocfree res. for freeing of the
850 * inode chunk. This is technically extraneous because the inode chunk free is
851 * deferred (it occurs after a transaction roll). Include the extra reservation
852 * anyways since we've had reports of ifree transaction overruns due to too many
853 * agfl fixups during inode chunk frees.
854 */
855 STATIC uint
xfs_calc_ifree_reservation(struct xfs_mount * mp)856 xfs_calc_ifree_reservation(
857 struct xfs_mount *mp)
858 {
859 return XFS_DQUOT_LOGRES +
860 xfs_calc_inode_res(mp, 1) +
861 xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
862 xfs_calc_iunlink_remove_reservation(mp) +
863 xfs_calc_inode_chunk_res(mp, _FREE) +
864 xfs_calc_inobt_res(mp) +
865 xfs_calc_finobt_res(mp);
866 }
867
868 /*
869 * When only changing the inode we log the inode and possibly the superblock
870 * We also add a bit of slop for the transaction stuff.
871 */
872 STATIC uint
xfs_calc_ichange_reservation(struct xfs_mount * mp)873 xfs_calc_ichange_reservation(
874 struct xfs_mount *mp)
875 {
876 return XFS_DQUOT_LOGRES +
877 xfs_calc_inode_res(mp, 1) +
878 xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
879
880 }
881
882 /*
883 * Growing the data section of the filesystem.
884 * superblock
885 * agi and agf
886 * allocation btrees
887 */
888 STATIC uint
xfs_calc_growdata_reservation(struct xfs_mount * mp)889 xfs_calc_growdata_reservation(
890 struct xfs_mount *mp)
891 {
892 return xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
893 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
894 XFS_FSB_TO_B(mp, 1));
895 }
896
897 /*
898 * Growing the rt section of the filesystem.
899 * In the first set of transactions (ALLOC) we allocate space to the
900 * bitmap or summary files.
901 * superblock: sector size
902 * agf of the ag from which the extent is allocated: sector size
903 * bmap btree for bitmap/summary inode: max depth * blocksize
904 * bitmap/summary inode: inode size
905 * allocation btrees for 1 block alloc: 2 * (2 * maxdepth - 1) * blocksize
906 */
907 STATIC uint
xfs_calc_growrtalloc_reservation(struct xfs_mount * mp)908 xfs_calc_growrtalloc_reservation(
909 struct xfs_mount *mp)
910 {
911 return xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
912 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
913 XFS_FSB_TO_B(mp, 1)) +
914 xfs_calc_inode_res(mp, 1) +
915 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
916 XFS_FSB_TO_B(mp, 1));
917 }
918
919 /*
920 * Growing the rt section of the filesystem.
921 * In the second set of transactions (ZERO) we zero the new metadata blocks.
922 * one bitmap/summary block: blocksize
923 */
924 STATIC uint
xfs_calc_growrtzero_reservation(struct xfs_mount * mp)925 xfs_calc_growrtzero_reservation(
926 struct xfs_mount *mp)
927 {
928 return xfs_calc_buf_res(1, mp->m_sb.sb_blocksize);
929 }
930
931 /*
932 * Growing the rt section of the filesystem.
933 * In the third set of transactions (FREE) we update metadata without
934 * allocating any new blocks.
935 * superblock: sector size
936 * bitmap inode: inode size
937 * summary inode: inode size
938 * one bitmap block: blocksize
939 * summary blocks: new summary size
940 */
941 STATIC uint
xfs_calc_growrtfree_reservation(struct xfs_mount * mp)942 xfs_calc_growrtfree_reservation(
943 struct xfs_mount *mp)
944 {
945 return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
946 xfs_calc_inode_res(mp, 2) +
947 xfs_calc_buf_res(1, mp->m_sb.sb_blocksize) +
948 xfs_calc_buf_res(1, XFS_FSB_TO_B(mp, mp->m_rsumblocks));
949 }
950
951 /*
952 * Logging the inode modification timestamp on a synchronous write.
953 * inode
954 */
955 STATIC uint
xfs_calc_swrite_reservation(struct xfs_mount * mp)956 xfs_calc_swrite_reservation(
957 struct xfs_mount *mp)
958 {
959 return xfs_calc_inode_res(mp, 1);
960 }
961
962 /*
963 * Logging the inode mode bits when writing a setuid/setgid file
964 * inode
965 */
966 STATIC uint
xfs_calc_writeid_reservation(struct xfs_mount * mp)967 xfs_calc_writeid_reservation(
968 struct xfs_mount *mp)
969 {
970 return xfs_calc_inode_res(mp, 1);
971 }
972
973 /*
974 * Converting the inode from non-attributed to attributed.
975 * the inode being converted: inode size
976 * agf block and superblock (for block allocation)
977 * the new block (directory sized)
978 * bmap blocks for the new directory block
979 * allocation btrees
980 */
981 STATIC uint
xfs_calc_addafork_reservation(struct xfs_mount * mp)982 xfs_calc_addafork_reservation(
983 struct xfs_mount *mp)
984 {
985 return XFS_DQUOT_LOGRES +
986 xfs_calc_inode_res(mp, 1) +
987 xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
988 xfs_calc_buf_res(1, mp->m_dir_geo->blksize) +
989 xfs_calc_buf_res(XFS_DAENTER_BMAP1B(mp, XFS_DATA_FORK) + 1,
990 XFS_FSB_TO_B(mp, 1)) +
991 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 1),
992 XFS_FSB_TO_B(mp, 1));
993 }
994
995 /*
996 * Removing the attribute fork of a file
997 * the inode being truncated: inode size
998 * the inode's bmap btree: max depth * block size
999 * And the bmap_finish transaction can free the blocks and bmap blocks:
1000 * the agf for each of the ags: 4 * sector size
1001 * the agfl for each of the ags: 4 * sector size
1002 * the super block to reflect the freed blocks: sector size
1003 * worst case split in allocation btrees per extent assuming 4 extents:
1004 * 4 exts * 2 trees * (2 * max depth - 1) * block size
1005 */
1006 STATIC uint
xfs_calc_attrinval_reservation(struct xfs_mount * mp)1007 xfs_calc_attrinval_reservation(
1008 struct xfs_mount *mp)
1009 {
1010 return max((xfs_calc_inode_res(mp, 1) +
1011 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK),
1012 XFS_FSB_TO_B(mp, 1))),
1013 (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
1014 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 4),
1015 XFS_FSB_TO_B(mp, 1))));
1016 }
1017
1018 /*
1019 * Setting an attribute at mount time.
1020 * the inode getting the attribute
1021 * the superblock for allocations
1022 * the agfs extents are allocated from
1023 * the attribute btree * max depth
1024 * the inode allocation btree
1025 * Since attribute transaction space is dependent on the size of the attribute,
1026 * the calculation is done partially at mount time and partially at runtime(see
1027 * below).
1028 */
1029 STATIC uint
xfs_calc_attrsetm_reservation(struct xfs_mount * mp)1030 xfs_calc_attrsetm_reservation(
1031 struct xfs_mount *mp)
1032 {
1033 return XFS_DQUOT_LOGRES +
1034 xfs_calc_inode_res(mp, 1) +
1035 xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
1036 xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH, XFS_FSB_TO_B(mp, 1));
1037 }
1038
1039 /*
1040 * Setting an attribute at runtime, transaction space unit per block.
1041 * the superblock for allocations: sector size
1042 * the inode bmap btree could join or split: max depth * block size
1043 * Since the runtime attribute transaction space is dependent on the total
1044 * blocks needed for the 1st bmap, here we calculate out the space unit for
1045 * one block so that the caller could figure out the total space according
1046 * to the attibute extent length in blocks by:
1047 * ext * M_RES(mp)->tr_attrsetrt.tr_logres
1048 */
1049 STATIC uint
xfs_calc_attrsetrt_reservation(struct xfs_mount * mp)1050 xfs_calc_attrsetrt_reservation(
1051 struct xfs_mount *mp)
1052 {
1053 return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
1054 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK),
1055 XFS_FSB_TO_B(mp, 1));
1056 }
1057
1058 /*
1059 * Removing an attribute.
1060 * the inode: inode size
1061 * the attribute btree could join: max depth * block size
1062 * the inode bmap btree could join or split: max depth * block size
1063 * And the bmap_finish transaction can free the attr blocks freed giving:
1064 * the agf for the ag in which the blocks live: 2 * sector size
1065 * the agfl for the ag in which the blocks live: 2 * sector size
1066 * the superblock for the free block count: sector size
1067 * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
1068 */
1069 STATIC uint
xfs_calc_attrrm_reservation(struct xfs_mount * mp)1070 xfs_calc_attrrm_reservation(
1071 struct xfs_mount *mp)
1072 {
1073 return XFS_DQUOT_LOGRES +
1074 max((xfs_calc_inode_res(mp, 1) +
1075 xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH,
1076 XFS_FSB_TO_B(mp, 1)) +
1077 (uint)XFS_FSB_TO_B(mp,
1078 XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)) +
1079 xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), 0)),
1080 (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
1081 xfs_calc_buf_res(xfs_allocfree_block_count(mp, 2),
1082 XFS_FSB_TO_B(mp, 1))));
1083 }
1084
1085 /*
1086 * Clearing a bad agino number in an agi hash bucket.
1087 */
1088 STATIC uint
xfs_calc_clear_agi_bucket_reservation(struct xfs_mount * mp)1089 xfs_calc_clear_agi_bucket_reservation(
1090 struct xfs_mount *mp)
1091 {
1092 return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
1093 }
1094
1095 /*
1096 * Adjusting quota limits.
1097 * the disk quota buffer: sizeof(struct xfs_disk_dquot)
1098 */
1099 STATIC uint
xfs_calc_qm_setqlim_reservation(void)1100 xfs_calc_qm_setqlim_reservation(void)
1101 {
1102 return xfs_calc_buf_res(1, sizeof(struct xfs_disk_dquot));
1103 }
1104
1105 /*
1106 * Allocating quota on disk if needed.
1107 * the write transaction log space for quota file extent allocation
1108 * the unit of quota allocation: one system block size
1109 */
1110 STATIC uint
xfs_calc_qm_dqalloc_reservation(struct xfs_mount * mp,bool for_minlogsize)1111 xfs_calc_qm_dqalloc_reservation(
1112 struct xfs_mount *mp,
1113 bool for_minlogsize)
1114 {
1115 return xfs_calc_write_reservation(mp, for_minlogsize) +
1116 xfs_calc_buf_res(1,
1117 XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) - 1);
1118 }
1119
1120 unsigned int
xfs_calc_qm_dqalloc_reservation_minlogsize(struct xfs_mount * mp)1121 xfs_calc_qm_dqalloc_reservation_minlogsize(
1122 struct xfs_mount *mp)
1123 {
1124 return xfs_calc_qm_dqalloc_reservation(mp, true);
1125 }
1126
1127 /*
1128 * Syncing the incore super block changes to disk.
1129 * the super block to reflect the changes: sector size
1130 */
1131 STATIC uint
xfs_calc_sb_reservation(struct xfs_mount * mp)1132 xfs_calc_sb_reservation(
1133 struct xfs_mount *mp)
1134 {
1135 return xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
1136 }
1137
1138 /*
1139 * Namespace reservations.
1140 *
1141 * These get tricky when parent pointers are enabled as we have attribute
1142 * modifications occurring from within these transactions. Rather than confuse
1143 * each of these reservation calculations with the conditional attribute
1144 * reservations, add them here in a clear and concise manner. This requires that
1145 * the attribute reservations have already been calculated.
1146 *
1147 * Note that we only include the static attribute reservation here; the runtime
1148 * reservation will have to be modified by the size of the attributes being
1149 * added/removed/modified. See the comments on the attribute reservation
1150 * calculations for more details.
1151 */
1152 STATIC void
xfs_calc_namespace_reservations(struct xfs_mount * mp,struct xfs_trans_resv * resp)1153 xfs_calc_namespace_reservations(
1154 struct xfs_mount *mp,
1155 struct xfs_trans_resv *resp)
1156 {
1157 ASSERT(resp->tr_attrsetm.tr_logres > 0);
1158
1159 resp->tr_rename.tr_logres = xfs_calc_rename_reservation(mp);
1160 resp->tr_rename.tr_logcount = xfs_rename_log_count(mp, resp);
1161 resp->tr_rename.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1162
1163 resp->tr_link.tr_logres = xfs_calc_link_reservation(mp);
1164 resp->tr_link.tr_logcount = xfs_link_log_count(mp, resp);
1165 resp->tr_link.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1166
1167 resp->tr_remove.tr_logres = xfs_calc_remove_reservation(mp);
1168 resp->tr_remove.tr_logcount = xfs_remove_log_count(mp, resp);
1169 resp->tr_remove.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1170
1171 resp->tr_symlink.tr_logres = xfs_calc_symlink_reservation(mp);
1172 resp->tr_symlink.tr_logcount = xfs_symlink_log_count(mp, resp);
1173 resp->tr_symlink.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1174
1175 resp->tr_create.tr_logres = xfs_calc_icreate_reservation(mp);
1176 resp->tr_create.tr_logcount = xfs_icreate_log_count(mp, resp);
1177 resp->tr_create.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1178
1179 resp->tr_mkdir.tr_logres = xfs_calc_mkdir_reservation(mp);
1180 resp->tr_mkdir.tr_logcount = xfs_mkdir_log_count(mp, resp);
1181 resp->tr_mkdir.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1182 }
1183
1184 void
xfs_trans_resv_calc(struct xfs_mount * mp,struct xfs_trans_resv * resp)1185 xfs_trans_resv_calc(
1186 struct xfs_mount *mp,
1187 struct xfs_trans_resv *resp)
1188 {
1189 int logcount_adj = 0;
1190
1191 /*
1192 * The following transactions are logged in physical format and
1193 * require a permanent reservation on space.
1194 */
1195 resp->tr_write.tr_logres = xfs_calc_write_reservation(mp, false);
1196 resp->tr_write.tr_logcount = XFS_WRITE_LOG_COUNT;
1197 resp->tr_write.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1198
1199 resp->tr_itruncate.tr_logres = xfs_calc_itruncate_reservation(mp, false);
1200 resp->tr_itruncate.tr_logcount = XFS_ITRUNCATE_LOG_COUNT;
1201 resp->tr_itruncate.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1202
1203 resp->tr_create_tmpfile.tr_logres =
1204 xfs_calc_create_tmpfile_reservation(mp);
1205 resp->tr_create_tmpfile.tr_logcount = XFS_CREATE_TMPFILE_LOG_COUNT;
1206 resp->tr_create_tmpfile.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1207
1208 resp->tr_ifree.tr_logres = xfs_calc_ifree_reservation(mp);
1209 resp->tr_ifree.tr_logcount = XFS_INACTIVE_LOG_COUNT;
1210 resp->tr_ifree.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1211
1212 resp->tr_addafork.tr_logres = xfs_calc_addafork_reservation(mp);
1213 resp->tr_addafork.tr_logcount = XFS_ADDAFORK_LOG_COUNT;
1214 resp->tr_addafork.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1215
1216 resp->tr_attrinval.tr_logres = xfs_calc_attrinval_reservation(mp);
1217 resp->tr_attrinval.tr_logcount = XFS_ATTRINVAL_LOG_COUNT;
1218 resp->tr_attrinval.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1219
1220 resp->tr_attrsetm.tr_logres = xfs_calc_attrsetm_reservation(mp);
1221 resp->tr_attrsetm.tr_logcount = XFS_ATTRSET_LOG_COUNT;
1222 resp->tr_attrsetm.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1223
1224 resp->tr_attrrm.tr_logres = xfs_calc_attrrm_reservation(mp);
1225 resp->tr_attrrm.tr_logcount = XFS_ATTRRM_LOG_COUNT;
1226 resp->tr_attrrm.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1227
1228 resp->tr_growrtalloc.tr_logres = xfs_calc_growrtalloc_reservation(mp);
1229 resp->tr_growrtalloc.tr_logcount = XFS_DEFAULT_PERM_LOG_COUNT;
1230 resp->tr_growrtalloc.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1231
1232 resp->tr_qm_dqalloc.tr_logres = xfs_calc_qm_dqalloc_reservation(mp,
1233 false);
1234 resp->tr_qm_dqalloc.tr_logcount = XFS_WRITE_LOG_COUNT;
1235 resp->tr_qm_dqalloc.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1236
1237 xfs_calc_namespace_reservations(mp, resp);
1238
1239 /*
1240 * The following transactions are logged in logical format with
1241 * a default log count.
1242 */
1243 resp->tr_qm_setqlim.tr_logres = xfs_calc_qm_setqlim_reservation();
1244 resp->tr_qm_setqlim.tr_logcount = XFS_DEFAULT_LOG_COUNT;
1245
1246 resp->tr_sb.tr_logres = xfs_calc_sb_reservation(mp);
1247 resp->tr_sb.tr_logcount = XFS_DEFAULT_LOG_COUNT;
1248
1249 /* growdata requires permanent res; it can free space to the last AG */
1250 resp->tr_growdata.tr_logres = xfs_calc_growdata_reservation(mp);
1251 resp->tr_growdata.tr_logcount = XFS_DEFAULT_PERM_LOG_COUNT;
1252 resp->tr_growdata.tr_logflags |= XFS_TRANS_PERM_LOG_RES;
1253
1254 /* The following transaction are logged in logical format */
1255 resp->tr_ichange.tr_logres = xfs_calc_ichange_reservation(mp);
1256 resp->tr_fsyncts.tr_logres = xfs_calc_swrite_reservation(mp);
1257 resp->tr_writeid.tr_logres = xfs_calc_writeid_reservation(mp);
1258 resp->tr_attrsetrt.tr_logres = xfs_calc_attrsetrt_reservation(mp);
1259 resp->tr_clearagi.tr_logres = xfs_calc_clear_agi_bucket_reservation(mp);
1260 resp->tr_growrtzero.tr_logres = xfs_calc_growrtzero_reservation(mp);
1261 resp->tr_growrtfree.tr_logres = xfs_calc_growrtfree_reservation(mp);
1262
1263 /*
1264 * Add one logcount for BUI items that appear with rmap or reflink,
1265 * one logcount for refcount intent items, and one logcount for rmap
1266 * intent items.
1267 */
1268 if (xfs_has_reflink(mp) || xfs_has_rmapbt(mp))
1269 logcount_adj++;
1270 if (xfs_has_reflink(mp))
1271 logcount_adj++;
1272 if (xfs_has_rmapbt(mp))
1273 logcount_adj++;
1274
1275 resp->tr_itruncate.tr_logcount += logcount_adj;
1276 resp->tr_write.tr_logcount += logcount_adj;
1277 resp->tr_qm_dqalloc.tr_logcount += logcount_adj;
1278 }
1279