1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_JOURNAL_H 3 #define _BCACHEFS_JOURNAL_H 4 5 /* 6 * THE JOURNAL: 7 * 8 * The primary purpose of the journal is to log updates (insertions) to the 9 * b-tree, to avoid having to do synchronous updates to the b-tree on disk. 10 * 11 * Without the journal, the b-tree is always internally consistent on 12 * disk - and in fact, in the earliest incarnations bcache didn't have a journal 13 * but did handle unclean shutdowns by doing all index updates synchronously 14 * (with coalescing). 15 * 16 * Updates to interior nodes still happen synchronously and without the journal 17 * (for simplicity) - this may change eventually but updates to interior nodes 18 * are rare enough it's not a huge priority. 19 * 20 * This means the journal is relatively separate from the b-tree; it consists of 21 * just a list of keys and journal replay consists of just redoing those 22 * insertions in same order that they appear in the journal. 23 * 24 * PERSISTENCE: 25 * 26 * For synchronous updates (where we're waiting on the index update to hit 27 * disk), the journal entry will be written out immediately (or as soon as 28 * possible, if the write for the previous journal entry was still in flight). 29 * 30 * Synchronous updates are specified by passing a closure (@flush_cl) to 31 * bch2_btree_insert() or bch_btree_insert_node(), which then pass that parameter 32 * down to the journalling code. That closure will wait on the journal write to 33 * complete (via closure_wait()). 34 * 35 * If the index update wasn't synchronous, the journal entry will be 36 * written out after 10 ms have elapsed, by default (the delay_ms field 37 * in struct journal). 38 * 39 * JOURNAL ENTRIES: 40 * 41 * A journal entry is variable size (struct jset), it's got a fixed length 42 * header and then a variable number of struct jset_entry entries. 43 * 44 * Journal entries are identified by monotonically increasing 64 bit sequence 45 * numbers - jset->seq; other places in the code refer to this sequence number. 46 * 47 * A jset_entry entry contains one or more bkeys (which is what gets inserted 48 * into the b-tree). We need a container to indicate which b-tree the key is 49 * for; also, the roots of the various b-trees are stored in jset_entry entries 50 * (one for each b-tree) - this lets us add new b-tree types without changing 51 * the on disk format. 52 * 53 * We also keep some things in the journal header that are logically part of the 54 * superblock - all the things that are frequently updated. This is for future 55 * bcache on raw flash support; the superblock (which will become another 56 * journal) can't be moved or wear leveled, so it contains just enough 57 * information to find the main journal, and the superblock only has to be 58 * rewritten when we want to move/wear level the main journal. 59 * 60 * JOURNAL LAYOUT ON DISK: 61 * 62 * The journal is written to a ringbuffer of buckets (which is kept in the 63 * superblock); the individual buckets are not necessarily contiguous on disk 64 * which means that journal entries are not allowed to span buckets, but also 65 * that we can resize the journal at runtime if desired (unimplemented). 66 * 67 * The journal buckets exist in the same pool as all the other buckets that are 68 * managed by the allocator and garbage collection - garbage collection marks 69 * the journal buckets as metadata buckets. 70 * 71 * OPEN/DIRTY JOURNAL ENTRIES: 72 * 73 * Open/dirty journal entries are journal entries that contain b-tree updates 74 * that have not yet been written out to the b-tree on disk. We have to track 75 * which journal entries are dirty, and we also have to avoid wrapping around 76 * the journal and overwriting old but still dirty journal entries with new 77 * journal entries. 78 * 79 * On disk, this is represented with the "last_seq" field of struct jset; 80 * last_seq is the first sequence number that journal replay has to replay. 81 * 82 * To avoid overwriting dirty journal entries on disk, we keep a mapping (in 83 * journal_device->seq) of for each journal bucket, the highest sequence number 84 * any journal entry it contains. Then, by comparing that against last_seq we 85 * can determine whether that journal bucket contains dirty journal entries or 86 * not. 87 * 88 * To track which journal entries are dirty, we maintain a fifo of refcounts 89 * (where each entry corresponds to a specific sequence number) - when a ref 90 * goes to 0, that journal entry is no longer dirty. 91 * 92 * Journalling of index updates is done at the same time as the b-tree itself is 93 * being modified (see btree_insert_key()); when we add the key to the journal 94 * the pending b-tree write takes a ref on the journal entry the key was added 95 * to. If a pending b-tree write would need to take refs on multiple dirty 96 * journal entries, it only keeps the ref on the oldest one (since a newer 97 * journal entry will still be replayed if an older entry was dirty). 98 * 99 * JOURNAL FILLING UP: 100 * 101 * There are two ways the journal could fill up; either we could run out of 102 * space to write to, or we could have too many open journal entries and run out 103 * of room in the fifo of refcounts. Since those refcounts are decremented 104 * without any locking we can't safely resize that fifo, so we handle it the 105 * same way. 106 * 107 * If the journal fills up, we start flushing dirty btree nodes until we can 108 * allocate space for a journal write again - preferentially flushing btree 109 * nodes that are pinning the oldest journal entries first. 110 */ 111 112 #include <linux/hash.h> 113 114 #include "journal_types.h" 115 116 struct bch_fs; 117 118 static inline void journal_wake(struct journal *j) 119 { 120 wake_up(&j->wait); 121 closure_wake_up(&j->async_wait); 122 closure_wake_up(&j->preres_wait); 123 } 124 125 static inline struct journal_buf *journal_cur_buf(struct journal *j) 126 { 127 return j->buf + j->reservations.idx; 128 } 129 130 /* Sequence number of oldest dirty journal entry */ 131 132 static inline u64 journal_last_seq(struct journal *j) 133 { 134 return j->pin.front; 135 } 136 137 static inline u64 journal_cur_seq(struct journal *j) 138 { 139 EBUG_ON(j->pin.back - 1 != atomic64_read(&j->seq)); 140 141 return j->pin.back - 1; 142 } 143 144 static inline u64 journal_last_unwritten_seq(struct journal *j) 145 { 146 return j->seq_ondisk + 1; 147 } 148 149 static inline int journal_state_count(union journal_res_state s, int idx) 150 { 151 switch (idx) { 152 case 0: return s.buf0_count; 153 case 1: return s.buf1_count; 154 case 2: return s.buf2_count; 155 case 3: return s.buf3_count; 156 } 157 BUG(); 158 } 159 160 static inline void journal_state_inc(union journal_res_state *s) 161 { 162 s->buf0_count += s->idx == 0; 163 s->buf1_count += s->idx == 1; 164 s->buf2_count += s->idx == 2; 165 s->buf3_count += s->idx == 3; 166 } 167 168 /* 169 * Amount of space that will be taken up by some keys in the journal (i.e. 170 * including the jset header) 171 */ 172 static inline unsigned jset_u64s(unsigned u64s) 173 { 174 return u64s + sizeof(struct jset_entry) / sizeof(u64); 175 } 176 177 static inline int journal_entry_overhead(struct journal *j) 178 { 179 return sizeof(struct jset) / sizeof(u64) + j->entry_u64s_reserved; 180 } 181 182 static inline struct jset_entry * 183 bch2_journal_add_entry_noreservation(struct journal_buf *buf, size_t u64s) 184 { 185 struct jset *jset = buf->data; 186 struct jset_entry *entry = vstruct_idx(jset, le32_to_cpu(jset->u64s)); 187 188 memset(entry, 0, sizeof(*entry)); 189 entry->u64s = cpu_to_le16(u64s); 190 191 le32_add_cpu(&jset->u64s, jset_u64s(u64s)); 192 193 return entry; 194 } 195 196 static inline struct jset_entry * 197 journal_res_entry(struct journal *j, struct journal_res *res) 198 { 199 return vstruct_idx(j->buf[res->idx].data, res->offset); 200 } 201 202 static inline unsigned journal_entry_init(struct jset_entry *entry, unsigned type, 203 enum btree_id id, unsigned level, 204 unsigned u64s) 205 { 206 entry->u64s = cpu_to_le16(u64s); 207 entry->btree_id = id; 208 entry->level = level; 209 entry->type = type; 210 entry->pad[0] = 0; 211 entry->pad[1] = 0; 212 entry->pad[2] = 0; 213 return jset_u64s(u64s); 214 } 215 216 static inline unsigned journal_entry_set(struct jset_entry *entry, unsigned type, 217 enum btree_id id, unsigned level, 218 const void *data, unsigned u64s) 219 { 220 unsigned ret = journal_entry_init(entry, type, id, level, u64s); 221 222 memcpy_u64s_small(entry->_data, data, u64s); 223 return ret; 224 } 225 226 static inline struct jset_entry * 227 bch2_journal_add_entry(struct journal *j, struct journal_res *res, 228 unsigned type, enum btree_id id, 229 unsigned level, unsigned u64s) 230 { 231 struct jset_entry *entry = journal_res_entry(j, res); 232 unsigned actual = journal_entry_init(entry, type, id, level, u64s); 233 234 EBUG_ON(!res->ref); 235 EBUG_ON(actual > res->u64s); 236 237 res->offset += actual; 238 res->u64s -= actual; 239 return entry; 240 } 241 242 static inline bool journal_entry_empty(struct jset *j) 243 { 244 struct jset_entry *i; 245 246 if (j->seq != j->last_seq) 247 return false; 248 249 vstruct_for_each(j, i) 250 if (i->type == BCH_JSET_ENTRY_btree_keys && i->u64s) 251 return false; 252 return true; 253 } 254 255 /* 256 * Drop reference on a buffer index and return true if the count has hit zero. 257 */ 258 static inline union journal_res_state journal_state_buf_put(struct journal *j, unsigned idx) 259 { 260 union journal_res_state s; 261 262 s.v = atomic64_sub_return(((union journal_res_state) { 263 .buf0_count = idx == 0, 264 .buf1_count = idx == 1, 265 .buf2_count = idx == 2, 266 .buf3_count = idx == 3, 267 }).v, &j->reservations.counter); 268 return s; 269 } 270 271 void bch2_journal_buf_put_final(struct journal *, u64, bool); 272 273 static inline void __bch2_journal_buf_put(struct journal *j, unsigned idx, u64 seq) 274 { 275 union journal_res_state s; 276 277 s = journal_state_buf_put(j, idx); 278 if (!journal_state_count(s, idx)) 279 bch2_journal_buf_put_final(j, seq, idx == s.unwritten_idx); 280 } 281 282 static inline void bch2_journal_buf_put(struct journal *j, unsigned idx, u64 seq) 283 { 284 union journal_res_state s; 285 286 s = journal_state_buf_put(j, idx); 287 if (!journal_state_count(s, idx)) { 288 spin_lock(&j->lock); 289 bch2_journal_buf_put_final(j, seq, idx == s.unwritten_idx); 290 spin_unlock(&j->lock); 291 } 292 } 293 294 /* 295 * This function releases the journal write structure so other threads can 296 * then proceed to add their keys as well. 297 */ 298 static inline void bch2_journal_res_put(struct journal *j, 299 struct journal_res *res) 300 { 301 if (!res->ref) 302 return; 303 304 lock_release(&j->res_map, _THIS_IP_); 305 306 while (res->u64s) 307 bch2_journal_add_entry(j, res, 308 BCH_JSET_ENTRY_btree_keys, 309 0, 0, 0); 310 311 bch2_journal_buf_put(j, res->idx, res->seq); 312 313 res->ref = 0; 314 } 315 316 int bch2_journal_res_get_slowpath(struct journal *, struct journal_res *, 317 unsigned); 318 319 /* First bits for BCH_WATERMARK: */ 320 enum journal_res_flags { 321 __JOURNAL_RES_GET_NONBLOCK = BCH_WATERMARK_BITS, 322 __JOURNAL_RES_GET_CHECK, 323 }; 324 325 #define JOURNAL_RES_GET_NONBLOCK (1 << __JOURNAL_RES_GET_NONBLOCK) 326 #define JOURNAL_RES_GET_CHECK (1 << __JOURNAL_RES_GET_CHECK) 327 328 static inline int journal_res_get_fast(struct journal *j, 329 struct journal_res *res, 330 unsigned flags) 331 { 332 union journal_res_state old, new; 333 u64 v = atomic64_read(&j->reservations.counter); 334 335 do { 336 old.v = new.v = v; 337 338 /* 339 * Check if there is still room in the current journal 340 * entry: 341 */ 342 if (new.cur_entry_offset + res->u64s > j->cur_entry_u64s) 343 return 0; 344 345 EBUG_ON(!journal_state_count(new, new.idx)); 346 347 if ((flags & BCH_WATERMARK_MASK) < j->watermark) 348 return 0; 349 350 new.cur_entry_offset += res->u64s; 351 journal_state_inc(&new); 352 353 /* 354 * If the refcount would overflow, we have to wait: 355 * XXX - tracepoint this: 356 */ 357 if (!journal_state_count(new, new.idx)) 358 return 0; 359 360 if (flags & JOURNAL_RES_GET_CHECK) 361 return 1; 362 } while ((v = atomic64_cmpxchg(&j->reservations.counter, 363 old.v, new.v)) != old.v); 364 365 res->ref = true; 366 res->idx = old.idx; 367 res->offset = old.cur_entry_offset; 368 res->seq = le64_to_cpu(j->buf[old.idx].data->seq); 369 return 1; 370 } 371 372 static inline int bch2_journal_res_get(struct journal *j, struct journal_res *res, 373 unsigned u64s, unsigned flags) 374 { 375 int ret; 376 377 EBUG_ON(res->ref); 378 EBUG_ON(!test_bit(JOURNAL_STARTED, &j->flags)); 379 380 res->u64s = u64s; 381 382 if (journal_res_get_fast(j, res, flags)) 383 goto out; 384 385 ret = bch2_journal_res_get_slowpath(j, res, flags); 386 if (ret) 387 return ret; 388 out: 389 if (!(flags & JOURNAL_RES_GET_CHECK)) { 390 lock_acquire_shared(&j->res_map, 0, 391 (flags & JOURNAL_RES_GET_NONBLOCK) != 0, 392 NULL, _THIS_IP_); 393 EBUG_ON(!res->ref); 394 } 395 return 0; 396 } 397 398 /* journal_preres: */ 399 400 static inline void journal_set_watermark(struct journal *j) 401 { 402 union journal_preres_state s = READ_ONCE(j->prereserved); 403 unsigned watermark = BCH_WATERMARK_stripe; 404 405 if (fifo_free(&j->pin) < j->pin.size / 4) 406 watermark = max_t(unsigned, watermark, BCH_WATERMARK_copygc); 407 if (fifo_free(&j->pin) < j->pin.size / 8) 408 watermark = max_t(unsigned, watermark, BCH_WATERMARK_reclaim); 409 410 if (s.reserved > s.remaining) 411 watermark = max_t(unsigned, watermark, BCH_WATERMARK_copygc); 412 if (!s.remaining) 413 watermark = max_t(unsigned, watermark, BCH_WATERMARK_reclaim); 414 415 if (watermark == j->watermark) 416 return; 417 418 swap(watermark, j->watermark); 419 if (watermark > j->watermark) 420 journal_wake(j); 421 } 422 423 static inline void bch2_journal_preres_put(struct journal *j, 424 struct journal_preres *res) 425 { 426 union journal_preres_state s = { .reserved = res->u64s }; 427 428 if (!res->u64s) 429 return; 430 431 s.v = atomic64_sub_return(s.v, &j->prereserved.counter); 432 res->u64s = 0; 433 434 if (unlikely(s.waiting)) { 435 clear_bit(ilog2((((union journal_preres_state) { .waiting = 1 }).v)), 436 (unsigned long *) &j->prereserved.v); 437 closure_wake_up(&j->preres_wait); 438 } 439 440 if (s.reserved <= s.remaining && j->watermark) 441 journal_set_watermark(j); 442 } 443 444 int __bch2_journal_preres_get(struct journal *, 445 struct journal_preres *, unsigned, unsigned); 446 447 static inline int bch2_journal_preres_get_fast(struct journal *j, 448 struct journal_preres *res, 449 unsigned new_u64s, 450 unsigned flags, 451 bool set_waiting) 452 { 453 int d = new_u64s - res->u64s; 454 union journal_preres_state old, new; 455 u64 v = atomic64_read(&j->prereserved.counter); 456 enum bch_watermark watermark = flags & BCH_WATERMARK_MASK; 457 int ret; 458 459 do { 460 old.v = new.v = v; 461 ret = 0; 462 463 if (watermark == BCH_WATERMARK_reclaim || 464 new.reserved + d < new.remaining) { 465 new.reserved += d; 466 ret = 1; 467 } else if (set_waiting && !new.waiting) 468 new.waiting = true; 469 else 470 return 0; 471 } while ((v = atomic64_cmpxchg(&j->prereserved.counter, 472 old.v, new.v)) != old.v); 473 474 if (ret) 475 res->u64s += d; 476 return ret; 477 } 478 479 static inline int bch2_journal_preres_get(struct journal *j, 480 struct journal_preres *res, 481 unsigned new_u64s, 482 unsigned flags) 483 { 484 if (new_u64s <= res->u64s) 485 return 0; 486 487 if (bch2_journal_preres_get_fast(j, res, new_u64s, flags, false)) 488 return 0; 489 490 if (flags & JOURNAL_RES_GET_NONBLOCK) 491 return -BCH_ERR_journal_preres_get_blocked; 492 493 return __bch2_journal_preres_get(j, res, new_u64s, flags); 494 } 495 496 /* journal_entry_res: */ 497 498 void bch2_journal_entry_res_resize(struct journal *, 499 struct journal_entry_res *, 500 unsigned); 501 502 int bch2_journal_flush_seq_async(struct journal *, u64, struct closure *); 503 void bch2_journal_flush_async(struct journal *, struct closure *); 504 505 int bch2_journal_flush_seq(struct journal *, u64); 506 int bch2_journal_flush(struct journal *); 507 bool bch2_journal_noflush_seq(struct journal *, u64); 508 int bch2_journal_meta(struct journal *); 509 510 void bch2_journal_halt(struct journal *); 511 512 static inline int bch2_journal_error(struct journal *j) 513 { 514 return j->reservations.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL 515 ? -EIO : 0; 516 } 517 518 struct bch_dev; 519 520 static inline void bch2_journal_set_replay_done(struct journal *j) 521 { 522 BUG_ON(!test_bit(JOURNAL_STARTED, &j->flags)); 523 set_bit(JOURNAL_REPLAY_DONE, &j->flags); 524 } 525 526 void bch2_journal_unblock(struct journal *); 527 void bch2_journal_block(struct journal *); 528 529 void __bch2_journal_debug_to_text(struct printbuf *, struct journal *); 530 void bch2_journal_debug_to_text(struct printbuf *, struct journal *); 531 void bch2_journal_pins_to_text(struct printbuf *, struct journal *); 532 bool bch2_journal_seq_pins_to_text(struct printbuf *, struct journal *, u64 *); 533 534 int bch2_set_nr_journal_buckets(struct bch_fs *, struct bch_dev *, 535 unsigned nr); 536 int bch2_dev_journal_alloc(struct bch_dev *); 537 int bch2_fs_journal_alloc(struct bch_fs *); 538 539 void bch2_dev_journal_stop(struct journal *, struct bch_dev *); 540 541 void bch2_fs_journal_stop(struct journal *); 542 int bch2_fs_journal_start(struct journal *, u64); 543 544 void bch2_dev_journal_exit(struct bch_dev *); 545 int bch2_dev_journal_init(struct bch_dev *, struct bch_sb *); 546 void bch2_fs_journal_exit(struct journal *); 547 int bch2_fs_journal_init(struct journal *); 548 549 #endif /* _BCACHEFS_JOURNAL_H */ 550