xref: /linux/fs/xfs/xfs_log.h (revision 24bce201d79807b668bf9d9e0aca801c5c0d5f78)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
4  * All Rights Reserved.
5  */
6 #ifndef	__XFS_LOG_H__
7 #define __XFS_LOG_H__
8 
9 struct xfs_cil_ctx;
10 
11 struct xfs_log_vec {
12 	struct xfs_log_vec	*lv_next;	/* next lv in build list */
13 	int			lv_niovecs;	/* number of iovecs in lv */
14 	struct xfs_log_iovec	*lv_iovecp;	/* iovec array */
15 	struct xfs_log_item	*lv_item;	/* owner */
16 	char			*lv_buf;	/* formatted buffer */
17 	int			lv_bytes;	/* accounted space in buffer */
18 	int			lv_buf_len;	/* aligned size of buffer */
19 	int			lv_size;	/* size of allocated lv */
20 };
21 
22 #define XFS_LOG_VEC_ORDERED	(-1)
23 
24 /*
25  * Calculate the log iovec length for a given user buffer length. Intended to be
26  * used by ->iop_size implementations when sizing buffers of arbitrary
27  * alignments.
28  */
29 static inline int
30 xlog_calc_iovec_len(int len)
31 {
32 	return roundup(len, sizeof(uint32_t));
33 }
34 
35 void *xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
36 		uint type);
37 
38 static inline void
39 xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec,
40 		int data_len)
41 {
42 	struct xlog_op_header	*oph = vec->i_addr;
43 	int			len;
44 
45 	/*
46 	 * Always round up the length to the correct alignment so callers don't
47 	 * need to know anything about this log vec layout requirement. This
48 	 * means we have to zero the area the data to be written does not cover.
49 	 * This is complicated by fact the payload region is offset into the
50 	 * logvec region by the opheader that tracks the payload.
51 	 */
52 	len = xlog_calc_iovec_len(data_len);
53 	if (len - data_len != 0) {
54 		char	*buf = vec->i_addr + sizeof(struct xlog_op_header);
55 
56 		memset(buf + data_len, 0, len - data_len);
57 	}
58 
59 	/*
60 	 * The opheader tracks aligned payload length, whilst the logvec tracks
61 	 * the overall region length.
62 	 */
63 	oph->oh_len = cpu_to_be32(len);
64 
65 	len += sizeof(struct xlog_op_header);
66 	lv->lv_buf_len += len;
67 	lv->lv_bytes += len;
68 	vec->i_len = len;
69 
70 	/* Catch buffer overruns */
71 	ASSERT((void *)lv->lv_buf + lv->lv_bytes <= (void *)lv + lv->lv_size);
72 }
73 
74 /*
75  * Copy the amount of data requested by the caller into a new log iovec.
76  */
77 static inline void *
78 xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
79 		uint type, void *data, int len)
80 {
81 	void *buf;
82 
83 	buf = xlog_prepare_iovec(lv, vecp, type);
84 	memcpy(buf, data, len);
85 	xlog_finish_iovec(lv, *vecp, len);
86 	return buf;
87 }
88 
89 static inline void *
90 xlog_copy_from_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
91 		const struct xfs_log_iovec *src)
92 {
93 	return xlog_copy_iovec(lv, vecp, src->i_type, src->i_addr, src->i_len);
94 }
95 
96 /*
97  * By comparing each component, we don't have to worry about extra
98  * endian issues in treating two 32 bit numbers as one 64 bit number
99  */
100 static inline xfs_lsn_t	_lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
101 {
102 	if (CYCLE_LSN(lsn1) != CYCLE_LSN(lsn2))
103 		return (CYCLE_LSN(lsn1)<CYCLE_LSN(lsn2))? -999 : 999;
104 
105 	if (BLOCK_LSN(lsn1) != BLOCK_LSN(lsn2))
106 		return (BLOCK_LSN(lsn1)<BLOCK_LSN(lsn2))? -999 : 999;
107 
108 	return 0;
109 }
110 
111 #define	XFS_LSN_CMP(x,y) _lsn_cmp(x,y)
112 
113 /*
114  * Flags to xfs_log_force()
115  *
116  *	XFS_LOG_SYNC:	Synchronous force in-core log to disk
117  */
118 #define XFS_LOG_SYNC		0x1
119 
120 /* Log manager interfaces */
121 struct xfs_mount;
122 struct xlog_in_core;
123 struct xlog_ticket;
124 struct xfs_log_item;
125 struct xfs_item_ops;
126 struct xfs_trans;
127 struct xlog;
128 
129 int	  xfs_log_force(struct xfs_mount *mp, uint flags);
130 int	  xfs_log_force_seq(struct xfs_mount *mp, xfs_csn_t seq, uint flags,
131 		int *log_forced);
132 int	  xfs_log_mount(struct xfs_mount	*mp,
133 			struct xfs_buftarg	*log_target,
134 			xfs_daddr_t		start_block,
135 			int		 	num_bblocks);
136 int	  xfs_log_mount_finish(struct xfs_mount *mp);
137 void	xfs_log_mount_cancel(struct xfs_mount *);
138 xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
139 xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
140 void	xfs_log_space_wake(struct xfs_mount *mp);
141 int	xfs_log_reserve(struct xfs_mount *mp, int length, int count,
142 			struct xlog_ticket **ticket, bool permanent);
143 int	xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic);
144 void	xfs_log_unmount(struct xfs_mount *mp);
145 bool	xfs_log_writable(struct xfs_mount *mp);
146 
147 struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket);
148 void	  xfs_log_ticket_put(struct xlog_ticket *ticket);
149 
150 void	xlog_cil_process_committed(struct list_head *list);
151 bool	xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
152 
153 void	xfs_log_work_queue(struct xfs_mount *mp);
154 int	xfs_log_quiesce(struct xfs_mount *mp);
155 void	xfs_log_clean(struct xfs_mount *mp);
156 bool	xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t);
157 
158 xfs_lsn_t xlog_grant_push_threshold(struct xlog *log, int need_bytes);
159 bool	  xlog_force_shutdown(struct xlog *log, uint32_t shutdown_flags);
160 
161 void xlog_use_incompat_feat(struct xlog *log);
162 void xlog_drop_incompat_feat(struct xlog *log);
163 int xfs_attr_use_log_assist(struct xfs_mount *mp);
164 
165 #endif	/* __XFS_LOG_H__ */
166