1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_JOURNAL_TYPES_H 3 #define _BCACHEFS_JOURNAL_TYPES_H 4 5 #include <linux/cache.h> 6 #include <linux/workqueue.h> 7 8 #include "alloc_types.h" 9 #include "super_types.h" 10 #include "fifo.h" 11 12 /* btree write buffer steals 8 bits for its own purposes: */ 13 #define JOURNAL_SEQ_MAX ((1ULL << 56) - 1) 14 15 #define JOURNAL_STATE_BUF_BITS 2 16 #define JOURNAL_STATE_BUF_NR (1U << JOURNAL_STATE_BUF_BITS) 17 #define JOURNAL_STATE_BUF_MASK (JOURNAL_STATE_BUF_NR - 1) 18 19 #define JOURNAL_BUF_BITS 4 20 #define JOURNAL_BUF_NR (1U << JOURNAL_BUF_BITS) 21 #define JOURNAL_BUF_MASK (JOURNAL_BUF_NR - 1) 22 23 /* 24 * We put JOURNAL_BUF_NR of these in struct journal; we used them for writes to 25 * the journal that are being staged or in flight. 26 */ 27 struct journal_buf { 28 struct closure io; 29 struct jset *data; 30 31 __BKEY_PADDED(key, BCH_REPLICAS_MAX); 32 struct bch_devs_list devs_written; 33 34 struct closure_waitlist wait; 35 u64 last_seq; /* copy of data->last_seq */ 36 long expires; 37 u64 flush_time; 38 39 unsigned buf_size; /* size in bytes of @data */ 40 unsigned sectors; /* maximum size for current entry */ 41 unsigned disk_sectors; /* maximum size entry could have been, if 42 buf_size was bigger */ 43 unsigned u64s_reserved; 44 bool noflush:1; /* write has already been kicked off, and was noflush */ 45 bool must_flush:1; /* something wants a flush */ 46 bool separate_flush:1; 47 bool need_flush_to_write_buffer:1; 48 bool write_started:1; 49 bool write_allocated:1; 50 bool write_done:1; 51 u8 idx; 52 }; 53 54 /* 55 * Something that makes a journal entry dirty - i.e. a btree node that has to be 56 * flushed: 57 */ 58 59 enum journal_pin_type { 60 JOURNAL_PIN_TYPE_btree3, 61 JOURNAL_PIN_TYPE_btree2, 62 JOURNAL_PIN_TYPE_btree1, 63 JOURNAL_PIN_TYPE_btree0, 64 JOURNAL_PIN_TYPE_key_cache, 65 JOURNAL_PIN_TYPE_other, 66 JOURNAL_PIN_TYPE_NR, 67 }; 68 69 struct journal_entry_pin_list { 70 struct list_head unflushed[JOURNAL_PIN_TYPE_NR]; 71 struct list_head flushed[JOURNAL_PIN_TYPE_NR]; 72 atomic_t count; 73 struct bch_devs_list devs; 74 }; 75 76 struct journal; 77 struct journal_entry_pin; 78 typedef int (*journal_pin_flush_fn)(struct journal *j, 79 struct journal_entry_pin *, u64); 80 81 struct journal_entry_pin { 82 struct list_head list; 83 journal_pin_flush_fn flush; 84 u64 seq; 85 }; 86 87 struct journal_res { 88 bool ref; 89 u16 u64s; 90 u32 offset; 91 u64 seq; 92 }; 93 94 union journal_res_state { 95 struct { 96 atomic64_t counter; 97 }; 98 99 struct { 100 u64 v; 101 }; 102 103 struct { 104 u64 cur_entry_offset:22, 105 idx:2, 106 buf0_count:10, 107 buf1_count:10, 108 buf2_count:10, 109 buf3_count:10; 110 }; 111 }; 112 113 /* bytes: */ 114 #define JOURNAL_ENTRY_SIZE_MIN (64U << 10) /* 64k */ 115 #define JOURNAL_ENTRY_SIZE_MAX (4U << 22) /* 16M */ 116 117 /* 118 * We stash some journal state as sentinal values in cur_entry_offset: 119 * note - cur_entry_offset is in units of u64s 120 */ 121 #define JOURNAL_ENTRY_OFFSET_MAX ((1U << 22) - 1) 122 123 #define JOURNAL_ENTRY_BLOCKED_VAL (JOURNAL_ENTRY_OFFSET_MAX - 2) 124 #define JOURNAL_ENTRY_CLOSED_VAL (JOURNAL_ENTRY_OFFSET_MAX - 1) 125 #define JOURNAL_ENTRY_ERROR_VAL (JOURNAL_ENTRY_OFFSET_MAX) 126 127 struct journal_space { 128 /* Units of 512 bytes sectors: */ 129 unsigned next_entry; /* How big the next journal entry can be */ 130 unsigned total; 131 }; 132 133 enum journal_space_from { 134 journal_space_discarded, 135 journal_space_clean_ondisk, 136 journal_space_clean, 137 journal_space_total, 138 journal_space_nr, 139 }; 140 141 #define JOURNAL_FLAGS() \ 142 x(replay_done) \ 143 x(running) \ 144 x(may_skip_flush) \ 145 x(need_flush_write) \ 146 x(space_low) 147 148 enum journal_flags { 149 #define x(n) JOURNAL_##n, 150 JOURNAL_FLAGS() 151 #undef x 152 }; 153 154 typedef DARRAY(u64) darray_u64; 155 156 struct journal_bio { 157 struct bch_dev *ca; 158 unsigned buf_idx; 159 u64 submit_time; 160 161 struct bio bio; 162 }; 163 164 /* Embedded in struct bch_fs */ 165 struct journal { 166 /* Fastpath stuff up front: */ 167 struct { 168 169 union journal_res_state reservations; 170 enum bch_watermark watermark; 171 172 } __aligned(SMP_CACHE_BYTES); 173 174 unsigned long flags; 175 176 /* Max size of current journal entry */ 177 unsigned cur_entry_u64s; 178 unsigned cur_entry_sectors; 179 180 /* Reserved space in journal entry to be used just prior to write */ 181 unsigned entry_u64s_reserved; 182 183 184 /* 185 * 0, or -ENOSPC if waiting on journal reclaim, or -EROFS if 186 * insufficient devices: 187 */ 188 int cur_entry_error; 189 unsigned cur_entry_offset_if_blocked; 190 191 unsigned buf_size_want; 192 /* 193 * We may queue up some things to be journalled (log messages) before 194 * the journal has actually started - stash them here: 195 */ 196 darray_u64 early_journal_entries; 197 198 /* 199 * Protects journal_buf->data, when accessing without a jorunal 200 * reservation: for synchronization between the btree write buffer code 201 * and the journal write path: 202 */ 203 struct mutex buf_lock; 204 /* 205 * Two journal entries -- one is currently open for new entries, the 206 * other is possibly being written out. 207 */ 208 struct journal_buf buf[JOURNAL_BUF_NR]; 209 void *free_buf; 210 unsigned free_buf_size; 211 212 spinlock_t lock; 213 214 /* if nonzero, we may not open a new journal entry: */ 215 unsigned blocked; 216 217 /* Used when waiting because the journal was full */ 218 wait_queue_head_t wait; 219 struct closure_waitlist async_wait; 220 struct closure_waitlist reclaim_flush_wait; 221 222 struct delayed_work write_work; 223 struct workqueue_struct *wq; 224 225 /* Sequence number of most recent journal entry (last entry in @pin) */ 226 atomic64_t seq; 227 228 u64 seq_write_started; 229 /* seq, last_seq from the most recent journal entry successfully written */ 230 u64 seq_ondisk; 231 u64 flushed_seq_ondisk; 232 u64 flushing_seq; 233 u64 last_seq_ondisk; 234 u64 err_seq; 235 u64 last_empty_seq; 236 u64 oldest_seq_found_ondisk; 237 238 /* 239 * FIFO of journal entries whose btree updates have not yet been 240 * written out. 241 * 242 * Each entry is a reference count. The position in the FIFO is the 243 * entry's sequence number relative to @seq. 244 * 245 * The journal entry itself holds a reference count, put when the 246 * journal entry is written out. Each btree node modified by the journal 247 * entry also holds a reference count, put when the btree node is 248 * written. 249 * 250 * When a reference count reaches zero, the journal entry is no longer 251 * needed. When all journal entries in the oldest journal bucket are no 252 * longer needed, the bucket can be discarded and reused. 253 */ 254 struct { 255 u64 front, back, size, mask; 256 struct journal_entry_pin_list *data; 257 } pin; 258 259 struct journal_space space[journal_space_nr]; 260 261 u64 replay_journal_seq; 262 u64 replay_journal_seq_end; 263 264 struct write_point wp; 265 spinlock_t err_lock; 266 267 struct mutex reclaim_lock; 268 /* 269 * Used for waiting until journal reclaim has freed up space in the 270 * journal: 271 */ 272 wait_queue_head_t reclaim_wait; 273 struct task_struct *reclaim_thread; 274 bool reclaim_kicked; 275 unsigned long next_reclaim; 276 u64 nr_direct_reclaim; 277 u64 nr_background_reclaim; 278 279 unsigned long last_flushed; 280 struct journal_entry_pin *flush_in_progress; 281 bool flush_in_progress_dropped; 282 wait_queue_head_t pin_flush_wait; 283 284 /* protects advancing ja->discard_idx: */ 285 struct mutex discard_lock; 286 bool can_discard; 287 288 unsigned long last_flush_write; 289 290 u64 write_start_time; 291 292 u64 nr_flush_writes; 293 u64 nr_noflush_writes; 294 u64 entry_bytes_written; 295 296 struct bch2_time_stats *flush_write_time; 297 struct bch2_time_stats *noflush_write_time; 298 struct bch2_time_stats *flush_seq_time; 299 300 #ifdef CONFIG_DEBUG_LOCK_ALLOC 301 struct lockdep_map res_map; 302 #endif 303 } __aligned(SMP_CACHE_BYTES); 304 305 /* 306 * Embedded in struct bch_dev. First three fields refer to the array of journal 307 * buckets, in bch_sb. 308 */ 309 struct journal_device { 310 /* 311 * For each journal bucket, contains the max sequence number of the 312 * journal writes it contains - so we know when a bucket can be reused. 313 */ 314 u64 *bucket_seq; 315 316 unsigned sectors_free; 317 318 /* 319 * discard_idx <= dirty_idx_ondisk <= dirty_idx <= cur_idx: 320 */ 321 unsigned discard_idx; /* Next bucket to discard */ 322 unsigned dirty_idx_ondisk; 323 unsigned dirty_idx; 324 unsigned cur_idx; /* Journal bucket we're currently writing to */ 325 unsigned nr; 326 327 u64 *buckets; 328 329 /* Bio for journal reads/writes to this device */ 330 struct journal_bio *bio[JOURNAL_BUF_NR]; 331 332 /* for bch_journal_read_device */ 333 struct closure read; 334 u64 highest_seq_found; 335 }; 336 337 /* 338 * journal_entry_res - reserve space in every journal entry: 339 */ 340 struct journal_entry_res { 341 unsigned u64s; 342 }; 343 344 #endif /* _BCACHEFS_JOURNAL_TYPES_H */ 345