1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "bcachefs.h"
4 #include "buckets_waiting_for_journal.h"
5 #include <linux/hash.h>
6 #include <linux/random.h>
7
8 static inline struct bucket_hashed *
bucket_hash(struct buckets_waiting_for_journal_table * t,unsigned hash_seed_idx,u64 dev_bucket)9 bucket_hash(struct buckets_waiting_for_journal_table *t,
10 unsigned hash_seed_idx, u64 dev_bucket)
11 {
12 return t->d + hash_64(dev_bucket ^ t->hash_seeds[hash_seed_idx], t->bits);
13 }
14
bucket_table_init(struct buckets_waiting_for_journal_table * t,size_t bits)15 static void bucket_table_init(struct buckets_waiting_for_journal_table *t, size_t bits)
16 {
17 unsigned i;
18
19 t->bits = bits;
20 for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++)
21 get_random_bytes(&t->hash_seeds[i], sizeof(t->hash_seeds[i]));
22 memset(t->d, 0, sizeof(t->d[0]) << t->bits);
23 }
24
bch2_bucket_needs_journal_commit(struct buckets_waiting_for_journal * b,u64 flushed_seq,unsigned dev,u64 bucket)25 bool bch2_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b,
26 u64 flushed_seq,
27 unsigned dev, u64 bucket)
28 {
29 struct buckets_waiting_for_journal_table *t;
30 u64 dev_bucket = (u64) dev << 56 | bucket;
31 bool ret = false;
32 unsigned i;
33
34 mutex_lock(&b->lock);
35 t = b->t;
36
37 for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++) {
38 struct bucket_hashed *h = bucket_hash(t, i, dev_bucket);
39
40 if (h->dev_bucket == dev_bucket) {
41 ret = h->journal_seq > flushed_seq;
42 break;
43 }
44 }
45
46 mutex_unlock(&b->lock);
47
48 return ret;
49 }
50
bucket_table_insert(struct buckets_waiting_for_journal_table * t,struct bucket_hashed * new,u64 flushed_seq)51 static bool bucket_table_insert(struct buckets_waiting_for_journal_table *t,
52 struct bucket_hashed *new,
53 u64 flushed_seq)
54 {
55 struct bucket_hashed *last_evicted = NULL;
56 unsigned tries, i;
57
58 for (tries = 0; tries < 10; tries++) {
59 struct bucket_hashed *old, *victim = NULL;
60
61 for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++) {
62 old = bucket_hash(t, i, new->dev_bucket);
63
64 if (old->dev_bucket == new->dev_bucket ||
65 old->journal_seq <= flushed_seq) {
66 *old = *new;
67 return true;
68 }
69
70 if (last_evicted != old)
71 victim = old;
72 }
73
74 /* hashed to same slot 3 times: */
75 if (!victim)
76 break;
77
78 /* Failed to find an empty slot: */
79 swap(*new, *victim);
80 last_evicted = victim;
81 }
82
83 return false;
84 }
85
bch2_set_bucket_needs_journal_commit(struct buckets_waiting_for_journal * b,u64 flushed_seq,unsigned dev,u64 bucket,u64 journal_seq)86 int bch2_set_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b,
87 u64 flushed_seq,
88 unsigned dev, u64 bucket,
89 u64 journal_seq)
90 {
91 struct buckets_waiting_for_journal_table *t, *n;
92 struct bucket_hashed tmp, new = {
93 .dev_bucket = (u64) dev << 56 | bucket,
94 .journal_seq = journal_seq,
95 };
96 size_t i, size, new_bits, nr_elements = 1, nr_rehashes = 0, nr_rehashes_this_size = 0;
97 int ret = 0;
98
99 mutex_lock(&b->lock);
100
101 if (likely(bucket_table_insert(b->t, &new, flushed_seq)))
102 goto out;
103
104 t = b->t;
105 size = 1UL << t->bits;
106 for (i = 0; i < size; i++)
107 nr_elements += t->d[i].journal_seq > flushed_seq;
108
109 new_bits = ilog2(roundup_pow_of_two(nr_elements * 3));
110 realloc:
111 n = kvmalloc(sizeof(*n) + (sizeof(n->d[0]) << new_bits), GFP_KERNEL);
112 if (!n) {
113 ret = -BCH_ERR_ENOMEM_buckets_waiting_for_journal_set;
114 goto out;
115 }
116
117 retry_rehash:
118 if (nr_rehashes_this_size == 3) {
119 new_bits++;
120 nr_rehashes_this_size = 0;
121 kvfree(n);
122 goto realloc;
123 }
124
125 nr_rehashes++;
126 nr_rehashes_this_size++;
127
128 bucket_table_init(n, new_bits);
129
130 tmp = new;
131 BUG_ON(!bucket_table_insert(n, &tmp, flushed_seq));
132
133 for (i = 0; i < 1UL << t->bits; i++) {
134 if (t->d[i].journal_seq <= flushed_seq)
135 continue;
136
137 tmp = t->d[i];
138 if (!bucket_table_insert(n, &tmp, flushed_seq))
139 goto retry_rehash;
140 }
141
142 b->t = n;
143 kvfree(t);
144
145 pr_debug("took %zu rehashes, table at %zu/%lu elements",
146 nr_rehashes, nr_elements, 1UL << b->t->bits);
147 out:
148 mutex_unlock(&b->lock);
149
150 return ret;
151 }
152
bch2_fs_buckets_waiting_for_journal_exit(struct bch_fs * c)153 void bch2_fs_buckets_waiting_for_journal_exit(struct bch_fs *c)
154 {
155 struct buckets_waiting_for_journal *b = &c->buckets_waiting_for_journal;
156
157 kvfree(b->t);
158 }
159
160 #define INITIAL_TABLE_BITS 3
161
bch2_fs_buckets_waiting_for_journal_init(struct bch_fs * c)162 int bch2_fs_buckets_waiting_for_journal_init(struct bch_fs *c)
163 {
164 struct buckets_waiting_for_journal *b = &c->buckets_waiting_for_journal;
165
166 mutex_init(&b->lock);
167
168 b->t = kvmalloc(sizeof(*b->t) +
169 (sizeof(b->t->d[0]) << INITIAL_TABLE_BITS), GFP_KERNEL);
170 if (!b->t)
171 return -BCH_ERR_ENOMEM_buckets_waiting_for_journal_init;
172
173 bucket_table_init(b->t, INITIAL_TABLE_BITS);
174 return 0;
175 }
176