1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "bcachefs.h" 4 #include "eytzinger.h" 5 #include "journal.h" 6 #include "journal_seq_blacklist.h" 7 #include "super-io.h" 8 9 /* 10 * journal_seq_blacklist machinery: 11 * 12 * To guarantee order of btree updates after a crash, we need to detect when a 13 * btree node entry (bset) is newer than the newest journal entry that was 14 * successfully written, and ignore it - effectively ignoring any btree updates 15 * that didn't make it into the journal. 16 * 17 * If we didn't do this, we might have two btree nodes, a and b, both with 18 * updates that weren't written to the journal yet: if b was updated after a, 19 * but b was flushed and not a - oops; on recovery we'll find that the updates 20 * to b happened, but not the updates to a that happened before it. 21 * 22 * Ignoring bsets that are newer than the newest journal entry is always safe, 23 * because everything they contain will also have been journalled - and must 24 * still be present in the journal on disk until a journal entry has been 25 * written _after_ that bset was written. 26 * 27 * To accomplish this, bsets record the newest journal sequence number they 28 * contain updates for; then, on startup, the btree code queries the journal 29 * code to ask "Is this sequence number newer than the newest journal entry? If 30 * so, ignore it." 31 * 32 * When this happens, we must blacklist that journal sequence number: the 33 * journal must not write any entries with that sequence number, and it must 34 * record that it was blacklisted so that a) on recovery we don't think we have 35 * missing journal entries and b) so that the btree code continues to ignore 36 * that bset, until that btree node is rewritten. 37 */ 38 39 static unsigned sb_blacklist_u64s(unsigned nr) 40 { 41 struct bch_sb_field_journal_seq_blacklist *bl; 42 43 return (sizeof(*bl) + sizeof(bl->start[0]) * nr) / sizeof(u64); 44 } 45 46 int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64 start, u64 end) 47 { 48 struct bch_sb_field_journal_seq_blacklist *bl; 49 unsigned i = 0, nr; 50 int ret = 0; 51 52 mutex_lock(&c->sb_lock); 53 bl = bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist); 54 nr = blacklist_nr_entries(bl); 55 56 while (i < nr) { 57 struct journal_seq_blacklist_entry *e = 58 bl->start + i; 59 60 if (end < le64_to_cpu(e->start)) 61 break; 62 63 if (start > le64_to_cpu(e->end)) { 64 i++; 65 continue; 66 } 67 68 /* 69 * Entry is contiguous or overlapping with new entry: merge it 70 * with new entry, and delete: 71 */ 72 73 start = min(start, le64_to_cpu(e->start)); 74 end = max(end, le64_to_cpu(e->end)); 75 array_remove_item(bl->start, nr, i); 76 } 77 78 bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist, 79 sb_blacklist_u64s(nr + 1)); 80 if (!bl) { 81 ret = -BCH_ERR_ENOSPC_sb_journal_seq_blacklist; 82 goto out; 83 } 84 85 array_insert_item(bl->start, nr, i, ((struct journal_seq_blacklist_entry) { 86 .start = cpu_to_le64(start), 87 .end = cpu_to_le64(end), 88 })); 89 c->disk_sb.sb->features[0] |= cpu_to_le64(1ULL << BCH_FEATURE_journal_seq_blacklist_v3); 90 91 ret = bch2_write_super(c); 92 out: 93 mutex_unlock(&c->sb_lock); 94 95 return ret ?: bch2_blacklist_table_initialize(c); 96 } 97 98 static int journal_seq_blacklist_table_cmp(const void *_l, const void *_r) 99 { 100 const struct journal_seq_blacklist_table_entry *l = _l; 101 const struct journal_seq_blacklist_table_entry *r = _r; 102 103 return cmp_int(l->start, r->start); 104 } 105 106 bool bch2_journal_seq_is_blacklisted(struct bch_fs *c, u64 seq, 107 bool dirty) 108 { 109 struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table; 110 struct journal_seq_blacklist_table_entry search = { .start = seq }; 111 int idx; 112 113 if (!t) 114 return false; 115 116 idx = eytzinger0_find_le(t->entries, t->nr, 117 sizeof(t->entries[0]), 118 journal_seq_blacklist_table_cmp, 119 &search); 120 if (idx < 0) 121 return false; 122 123 BUG_ON(t->entries[idx].start > seq); 124 125 if (seq >= t->entries[idx].end) 126 return false; 127 128 if (dirty) 129 t->entries[idx].dirty = true; 130 return true; 131 } 132 133 int bch2_blacklist_table_initialize(struct bch_fs *c) 134 { 135 struct bch_sb_field_journal_seq_blacklist *bl = 136 bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist); 137 struct journal_seq_blacklist_table *t; 138 unsigned i, nr = blacklist_nr_entries(bl); 139 140 if (!bl) 141 return 0; 142 143 t = kzalloc(struct_size(t, entries, nr), GFP_KERNEL); 144 if (!t) 145 return -BCH_ERR_ENOMEM_blacklist_table_init; 146 147 t->nr = nr; 148 149 for (i = 0; i < nr; i++) { 150 t->entries[i].start = le64_to_cpu(bl->start[i].start); 151 t->entries[i].end = le64_to_cpu(bl->start[i].end); 152 } 153 154 eytzinger0_sort(t->entries, 155 t->nr, 156 sizeof(t->entries[0]), 157 journal_seq_blacklist_table_cmp, 158 NULL); 159 160 kfree(c->journal_seq_blacklist_table); 161 c->journal_seq_blacklist_table = t; 162 return 0; 163 } 164 165 static int bch2_sb_journal_seq_blacklist_validate(struct bch_sb *sb, struct bch_sb_field *f, 166 enum bch_validate_flags flags, struct printbuf *err) 167 { 168 struct bch_sb_field_journal_seq_blacklist *bl = 169 field_to_type(f, journal_seq_blacklist); 170 unsigned i, nr = blacklist_nr_entries(bl); 171 172 for (i = 0; i < nr; i++) { 173 struct journal_seq_blacklist_entry *e = bl->start + i; 174 175 if (le64_to_cpu(e->start) >= 176 le64_to_cpu(e->end)) { 177 prt_printf(err, "entry %u start >= end (%llu >= %llu)", 178 i, le64_to_cpu(e->start), le64_to_cpu(e->end)); 179 return -BCH_ERR_invalid_sb_journal_seq_blacklist; 180 } 181 182 if (i + 1 < nr && 183 le64_to_cpu(e[0].end) > 184 le64_to_cpu(e[1].start)) { 185 prt_printf(err, "entry %u out of order with next entry (%llu > %llu)", 186 i + 1, le64_to_cpu(e[0].end), le64_to_cpu(e[1].start)); 187 return -BCH_ERR_invalid_sb_journal_seq_blacklist; 188 } 189 } 190 191 return 0; 192 } 193 194 static void bch2_sb_journal_seq_blacklist_to_text(struct printbuf *out, 195 struct bch_sb *sb, 196 struct bch_sb_field *f) 197 { 198 struct bch_sb_field_journal_seq_blacklist *bl = 199 field_to_type(f, journal_seq_blacklist); 200 struct journal_seq_blacklist_entry *i; 201 unsigned nr = blacklist_nr_entries(bl); 202 203 for (i = bl->start; i < bl->start + nr; i++) { 204 if (i != bl->start) 205 prt_printf(out, " "); 206 207 prt_printf(out, "%llu-%llu", 208 le64_to_cpu(i->start), 209 le64_to_cpu(i->end)); 210 } 211 prt_newline(out); 212 } 213 214 const struct bch_sb_field_ops bch_sb_field_ops_journal_seq_blacklist = { 215 .validate = bch2_sb_journal_seq_blacklist_validate, 216 .to_text = bch2_sb_journal_seq_blacklist_to_text 217 }; 218 219 bool bch2_blacklist_entries_gc(struct bch_fs *c) 220 { 221 struct journal_seq_blacklist_entry *src, *dst; 222 223 struct bch_sb_field_journal_seq_blacklist *bl = 224 bch2_sb_field_get(c->disk_sb.sb, journal_seq_blacklist); 225 if (!bl) 226 return false; 227 228 unsigned nr = blacklist_nr_entries(bl); 229 dst = bl->start; 230 231 struct journal_seq_blacklist_table *t = c->journal_seq_blacklist_table; 232 BUG_ON(nr != t->nr); 233 234 unsigned i; 235 for (src = bl->start, i = t->nr == 0 ? 0 : eytzinger0_first(t->nr); 236 src < bl->start + nr; 237 src++, i = eytzinger0_next(i, nr)) { 238 BUG_ON(t->entries[i].start != le64_to_cpu(src->start)); 239 BUG_ON(t->entries[i].end != le64_to_cpu(src->end)); 240 241 if (t->entries[i].dirty || t->entries[i].end >= c->journal.oldest_seq_found_ondisk) 242 *dst++ = *src; 243 } 244 245 unsigned new_nr = dst - bl->start; 246 if (new_nr == nr) 247 return false; 248 249 bch_verbose(c, "nr blacklist entries was %u, now %u", nr, new_nr); 250 251 bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist, 252 new_nr ? sb_blacklist_u64s(new_nr) : 0); 253 BUG_ON(new_nr && !bl); 254 return true; 255 } 256