xref: /linux/fs/xfs/libxfs/xfs_defer.h (revision 791d3ef2e11100449837dc0b6fe884e60ca3a484)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2016 Oracle.  All Rights Reserved.
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  */
6 #ifndef __XFS_DEFER_H__
7 #define	__XFS_DEFER_H__
8 
9 struct xfs_defer_op_type;
10 
11 /*
12  * Save a log intent item and a list of extents, so that we can replay
13  * whatever action had to happen to the extent list and file the log done
14  * item.
15  */
16 struct xfs_defer_pending {
17 	const struct xfs_defer_op_type	*dfp_type;	/* function pointers */
18 	struct list_head		dfp_list;	/* pending items */
19 	void				*dfp_intent;	/* log intent item */
20 	void				*dfp_done;	/* log done item */
21 	struct list_head		dfp_work;	/* work items */
22 	unsigned int			dfp_count;	/* # extent items */
23 };
24 
25 /*
26  * Header for deferred operation list.
27  *
28  * dop_low is used by the allocator to activate the lowspace algorithm -
29  * when free space is running low the extent allocator may choose to
30  * allocate an extent from an AG without leaving sufficient space for
31  * a btree split when inserting the new extent.  In this case the allocator
32  * will enable the lowspace algorithm which is supposed to allow further
33  * allocations (such as btree splits and newroots) to allocate from
34  * sequential AGs.  In order to avoid locking AGs out of order the lowspace
35  * algorithm will start searching for free space from AG 0.  If the correct
36  * transaction reservations have been made then this algorithm will eventually
37  * find all the space it needs.
38  */
39 enum xfs_defer_ops_type {
40 	XFS_DEFER_OPS_TYPE_BMAP,
41 	XFS_DEFER_OPS_TYPE_REFCOUNT,
42 	XFS_DEFER_OPS_TYPE_RMAP,
43 	XFS_DEFER_OPS_TYPE_FREE,
44 	XFS_DEFER_OPS_TYPE_AGFL_FREE,
45 	XFS_DEFER_OPS_TYPE_MAX,
46 };
47 
48 #define XFS_DEFER_OPS_NR_INODES	2	/* join up to two inodes */
49 #define XFS_DEFER_OPS_NR_BUFS	2	/* join up to two buffers */
50 
51 struct xfs_defer_ops {
52 	bool			dop_committed;	/* did any trans commit? */
53 	bool			dop_low;	/* alloc in low mode */
54 	struct list_head	dop_intake;	/* unlogged pending work */
55 	struct list_head	dop_pending;	/* logged pending work */
56 
57 	/* relog these with each roll */
58 	struct xfs_inode	*dop_inodes[XFS_DEFER_OPS_NR_INODES];
59 	struct xfs_buf		*dop_bufs[XFS_DEFER_OPS_NR_BUFS];
60 };
61 
62 void xfs_defer_add(struct xfs_defer_ops *dop, enum xfs_defer_ops_type type,
63 		struct list_head *h);
64 int xfs_defer_finish(struct xfs_trans **tp, struct xfs_defer_ops *dop);
65 void xfs_defer_cancel(struct xfs_defer_ops *dop);
66 void xfs_defer_init(struct xfs_defer_ops *dop, xfs_fsblock_t *fbp);
67 bool xfs_defer_has_unfinished_work(struct xfs_defer_ops *dop);
68 int xfs_defer_ijoin(struct xfs_defer_ops *dop, struct xfs_inode *ip);
69 int xfs_defer_bjoin(struct xfs_defer_ops *dop, struct xfs_buf *bp);
70 
71 /* Description of a deferred type. */
72 struct xfs_defer_op_type {
73 	enum xfs_defer_ops_type	type;
74 	unsigned int		max_items;
75 	void (*abort_intent)(void *);
76 	void *(*create_done)(struct xfs_trans *, void *, unsigned int);
77 	int (*finish_item)(struct xfs_trans *, struct xfs_defer_ops *,
78 			struct list_head *, void *, void **);
79 	void (*finish_cleanup)(struct xfs_trans *, void *, int);
80 	void (*cancel_item)(struct list_head *);
81 	int (*diff_items)(void *, struct list_head *, struct list_head *);
82 	void *(*create_intent)(struct xfs_trans *, uint);
83 	void (*log_item)(struct xfs_trans *, void *, struct list_head *);
84 };
85 
86 void xfs_defer_init_op_type(const struct xfs_defer_op_type *type);
87 
88 #endif /* __XFS_DEFER_H__ */
89