1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #ifndef __XFS_LOG_H__ 7 #define __XFS_LOG_H__ 8 9 struct xfs_cil_ctx; 10 11 struct xfs_log_vec { 12 struct list_head lv_list; /* CIL lv chain ptrs */ 13 uint32_t lv_order_id; /* chain ordering info */ 14 int lv_niovecs; /* number of iovecs in lv */ 15 struct xfs_log_iovec *lv_iovecp; /* iovec array */ 16 struct xfs_log_item *lv_item; /* owner */ 17 char *lv_buf; /* formatted buffer */ 18 int lv_bytes; /* accounted space in buffer */ 19 int lv_buf_len; /* aligned size of buffer */ 20 int lv_size; /* size of allocated lv */ 21 }; 22 23 #define XFS_LOG_VEC_ORDERED (-1) 24 25 /* 26 * Calculate the log iovec length for a given user buffer length. Intended to be 27 * used by ->iop_size implementations when sizing buffers of arbitrary 28 * alignments. 29 */ 30 static inline int 31 xlog_calc_iovec_len(int len) 32 { 33 return roundup(len, sizeof(uint32_t)); 34 } 35 36 void *xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, 37 uint type); 38 39 static inline void 40 xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, 41 int data_len) 42 { 43 struct xlog_op_header *oph = vec->i_addr; 44 int len; 45 46 /* 47 * Always round up the length to the correct alignment so callers don't 48 * need to know anything about this log vec layout requirement. This 49 * means we have to zero the area the data to be written does not cover. 50 * This is complicated by fact the payload region is offset into the 51 * logvec region by the opheader that tracks the payload. 52 */ 53 len = xlog_calc_iovec_len(data_len); 54 if (len - data_len != 0) { 55 char *buf = vec->i_addr + sizeof(struct xlog_op_header); 56 57 memset(buf + data_len, 0, len - data_len); 58 } 59 60 /* 61 * The opheader tracks aligned payload length, whilst the logvec tracks 62 * the overall region length. 63 */ 64 oph->oh_len = cpu_to_be32(len); 65 66 len += sizeof(struct xlog_op_header); 67 lv->lv_buf_len += len; 68 lv->lv_bytes += len; 69 vec->i_len = len; 70 71 /* Catch buffer overruns */ 72 ASSERT((void *)lv->lv_buf + lv->lv_bytes <= (void *)lv + lv->lv_size); 73 } 74 75 /* 76 * Copy the amount of data requested by the caller into a new log iovec. 77 */ 78 static inline void * 79 xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, 80 uint type, void *data, int len) 81 { 82 void *buf; 83 84 buf = xlog_prepare_iovec(lv, vecp, type); 85 memcpy(buf, data, len); 86 xlog_finish_iovec(lv, *vecp, len); 87 return buf; 88 } 89 90 static inline void * 91 xlog_copy_from_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp, 92 const struct xfs_log_iovec *src) 93 { 94 return xlog_copy_iovec(lv, vecp, src->i_type, src->i_addr, src->i_len); 95 } 96 97 /* 98 * By comparing each component, we don't have to worry about extra 99 * endian issues in treating two 32 bit numbers as one 64 bit number 100 */ 101 static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2) 102 { 103 if (CYCLE_LSN(lsn1) != CYCLE_LSN(lsn2)) 104 return (CYCLE_LSN(lsn1)<CYCLE_LSN(lsn2))? -999 : 999; 105 106 if (BLOCK_LSN(lsn1) != BLOCK_LSN(lsn2)) 107 return (BLOCK_LSN(lsn1)<BLOCK_LSN(lsn2))? -999 : 999; 108 109 return 0; 110 } 111 112 #define XFS_LSN_CMP(x,y) _lsn_cmp(x,y) 113 114 /* 115 * Flags to xfs_log_force() 116 * 117 * XFS_LOG_SYNC: Synchronous force in-core log to disk 118 */ 119 #define XFS_LOG_SYNC 0x1 120 121 /* Log manager interfaces */ 122 struct xfs_mount; 123 struct xlog_in_core; 124 struct xlog_ticket; 125 struct xfs_log_item; 126 struct xfs_item_ops; 127 struct xfs_trans; 128 struct xlog; 129 130 int xfs_log_force(struct xfs_mount *mp, uint flags); 131 int xfs_log_force_seq(struct xfs_mount *mp, xfs_csn_t seq, uint flags, 132 int *log_forced); 133 int xfs_log_mount(struct xfs_mount *mp, 134 struct xfs_buftarg *log_target, 135 xfs_daddr_t start_block, 136 int num_bblocks); 137 int xfs_log_mount_finish(struct xfs_mount *mp); 138 void xfs_log_mount_cancel(struct xfs_mount *); 139 xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp); 140 xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp); 141 void xfs_log_space_wake(struct xfs_mount *mp); 142 int xfs_log_reserve(struct xfs_mount *mp, int length, int count, 143 struct xlog_ticket **ticket, bool permanent); 144 int xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic); 145 void xfs_log_unmount(struct xfs_mount *mp); 146 bool xfs_log_writable(struct xfs_mount *mp); 147 148 struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket); 149 void xfs_log_ticket_put(struct xlog_ticket *ticket); 150 151 void xlog_cil_process_committed(struct list_head *list); 152 bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip); 153 154 void xfs_log_work_queue(struct xfs_mount *mp); 155 int xfs_log_quiesce(struct xfs_mount *mp); 156 void xfs_log_clean(struct xfs_mount *mp); 157 bool xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t); 158 159 xfs_lsn_t xlog_grant_push_threshold(struct xlog *log, int need_bytes); 160 bool xlog_force_shutdown(struct xlog *log, uint32_t shutdown_flags); 161 162 void xlog_use_incompat_feat(struct xlog *log); 163 void xlog_drop_incompat_feat(struct xlog *log); 164 int xfs_attr_use_log_assist(struct xfs_mount *mp); 165 166 #endif /* __XFS_LOG_H__ */ 167