journal.c (6268dc2c4703aabfb0b35681be709acf4c2826c6) journal.c (6f10f7d1b02b1bbc305f88d7696445dd38b13881)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * bcache journalling code, for btree insertions
4 *
5 * Copyright 2012 Google, Inc.
6 */
7
8#include "bcache.h"

--- 18 unchanged lines hidden (view full) ---

27
28static void journal_read_endio(struct bio *bio)
29{
30 struct closure *cl = bio->bi_private;
31 closure_put(cl);
32}
33
34static int journal_read_bucket(struct cache *ca, struct list_head *list,
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * bcache journalling code, for btree insertions
4 *
5 * Copyright 2012 Google, Inc.
6 */
7
8#include "bcache.h"

--- 18 unchanged lines hidden (view full) ---

27
28static void journal_read_endio(struct bio *bio)
29{
30 struct closure *cl = bio->bi_private;
31 closure_put(cl);
32}
33
34static int journal_read_bucket(struct cache *ca, struct list_head *list,
35 unsigned bucket_index)
35 unsigned int bucket_index)
36{
37 struct journal_device *ja = &ca->journal;
38 struct bio *bio = &ja->bio;
39
40 struct journal_replay *i;
41 struct jset *j, *data = ca->set->journal.w[0].data;
42 struct closure cl;
36{
37 struct journal_device *ja = &ca->journal;
38 struct bio *bio = &ja->bio;
39
40 struct journal_replay *i;
41 struct jset *j, *data = ca->set->journal.w[0].data;
42 struct closure cl;
43 unsigned len, left, offset = 0;
43 unsigned int len, left, offset = 0;
44 int ret = 0;
45 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
46
47 closure_init_stack(&cl);
48
49 pr_debug("reading %u", bucket_index);
50
51 while (offset < ca->sb.bucket_size) {
52reread: left = ca->sb.bucket_size - offset;
44 int ret = 0;
45 sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
46
47 closure_init_stack(&cl);
48
49 pr_debug("reading %u", bucket_index);
50
51 while (offset < ca->sb.bucket_size) {
52reread: left = ca->sb.bucket_size - offset;
53 len = min_t(unsigned, left, PAGE_SECTORS << JSET_BITS);
53 len = min_t(unsigned int, left, PAGE_SECTORS << JSET_BITS);
54
55 bio_reset(bio);
56 bio->bi_iter.bi_sector = bucket + offset;
57 bio_set_dev(bio, ca->bdev);
58 bio->bi_iter.bi_size = len << 9;
59
60 bio->bi_end_io = journal_read_endio;
61 bio->bi_private = &cl;

--- 87 unchanged lines hidden (view full) ---

149 int ret = journal_read_bucket(ca, list, b); \
150 __set_bit(b, bitmap); \
151 if (ret < 0) \
152 return ret; \
153 ret; \
154 })
155
156 struct cache *ca;
54
55 bio_reset(bio);
56 bio->bi_iter.bi_sector = bucket + offset;
57 bio_set_dev(bio, ca->bdev);
58 bio->bi_iter.bi_size = len << 9;
59
60 bio->bi_end_io = journal_read_endio;
61 bio->bi_private = &cl;

--- 87 unchanged lines hidden (view full) ---

149 int ret = journal_read_bucket(ca, list, b); \
150 __set_bit(b, bitmap); \
151 if (ret < 0) \
152 return ret; \
153 ret; \
154 })
155
156 struct cache *ca;
157 unsigned iter;
157 unsigned int iter;
158
159 for_each_cache(ca, c, iter) {
160 struct journal_device *ja = &ca->journal;
161 DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
158
159 for_each_cache(ca, c, iter) {
160 struct journal_device *ja = &ca->journal;
161 DECLARE_BITMAP(bitmap, SB_JOURNAL_BUCKETS);
162 unsigned i, l, r, m;
162 unsigned int i, l, r, m;
163 uint64_t seq;
164
165 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
166 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
167
168 /*
169 * Read journal buckets ordered by golden ratio hash to quickly
170 * find a sequence of buckets with valid journal entries

--- 128 unchanged lines hidden (view full) ---

299 i->pin = &fifo_front(&j->pin);
300 atomic_set(i->pin, 1);
301 }
302
303 for (k = i->j.start;
304 k < bset_bkey_last(&i->j);
305 k = bkey_next(k))
306 if (!__bch_extent_invalid(c, k)) {
163 uint64_t seq;
164
165 bitmap_zero(bitmap, SB_JOURNAL_BUCKETS);
166 pr_debug("%u journal buckets", ca->sb.njournal_buckets);
167
168 /*
169 * Read journal buckets ordered by golden ratio hash to quickly
170 * find a sequence of buckets with valid journal entries

--- 128 unchanged lines hidden (view full) ---

299 i->pin = &fifo_front(&j->pin);
300 atomic_set(i->pin, 1);
301 }
302
303 for (k = i->j.start;
304 k < bset_bkey_last(&i->j);
305 k = bkey_next(k))
306 if (!__bch_extent_invalid(c, k)) {
307 unsigned j;
307 unsigned int j;
308
309 for (j = 0; j < KEY_PTRS(k); j++)
310 if (ptr_available(c, k, j))
311 atomic_inc(&PTR_BUCKET(c, k, j)->pin);
312
313 bch_initial_mark_key(c, 0, k);
314 }
315 }

--- 171 unchanged lines hidden (view full) ---

487 }
488}
489
490static void journal_reclaim(struct cache_set *c)
491{
492 struct bkey *k = &c->journal.key;
493 struct cache *ca;
494 uint64_t last_seq;
308
309 for (j = 0; j < KEY_PTRS(k); j++)
310 if (ptr_available(c, k, j))
311 atomic_inc(&PTR_BUCKET(c, k, j)->pin);
312
313 bch_initial_mark_key(c, 0, k);
314 }
315 }

--- 171 unchanged lines hidden (view full) ---

487 }
488}
489
490static void journal_reclaim(struct cache_set *c)
491{
492 struct bkey *k = &c->journal.key;
493 struct cache *ca;
494 uint64_t last_seq;
495 unsigned iter, n = 0;
495 unsigned int iter, n = 0;
496 atomic_t p __maybe_unused;
497
498 atomic_long_inc(&c->reclaim);
499
500 while (!atomic_read(&fifo_front(&c->journal.pin)))
501 fifo_pop(&c->journal.pin, p);
502
503 last_seq = last_seq(&c->journal);

--- 17 unchanged lines hidden (view full) ---

521
522 /*
523 * Allocate:
524 * XXX: Sort by free journal space
525 */
526
527 for_each_cache(ca, c, iter) {
528 struct journal_device *ja = &ca->journal;
496 atomic_t p __maybe_unused;
497
498 atomic_long_inc(&c->reclaim);
499
500 while (!atomic_read(&fifo_front(&c->journal.pin)))
501 fifo_pop(&c->journal.pin, p);
502
503 last_seq = last_seq(&c->journal);

--- 17 unchanged lines hidden (view full) ---

521
522 /*
523 * Allocate:
524 * XXX: Sort by free journal space
525 */
526
527 for_each_cache(ca, c, iter) {
528 struct journal_device *ja = &ca->journal;
529 unsigned next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
529 unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
530
531 /* No space available on this device */
532 if (next == ja->discard_idx)
533 continue;
534
535 ja->cur_idx = next;
536 k->ptr[n++] = MAKE_PTR(0,
537 bucket_to_sector(c, ca->sb.d[ja->cur_idx]),

--- 66 unchanged lines hidden (view full) ---

604
605static void journal_write_unlocked(struct closure *cl)
606 __releases(c->journal.lock)
607{
608 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
609 struct cache *ca;
610 struct journal_write *w = c->journal.cur;
611 struct bkey *k = &c->journal.key;
530
531 /* No space available on this device */
532 if (next == ja->discard_idx)
533 continue;
534
535 ja->cur_idx = next;
536 k->ptr[n++] = MAKE_PTR(0,
537 bucket_to_sector(c, ca->sb.d[ja->cur_idx]),

--- 66 unchanged lines hidden (view full) ---

604
605static void journal_write_unlocked(struct closure *cl)
606 __releases(c->journal.lock)
607{
608 struct cache_set *c = container_of(cl, struct cache_set, journal.io);
609 struct cache *ca;
610 struct journal_write *w = c->journal.cur;
611 struct bkey *k = &c->journal.key;
612 unsigned i, sectors = set_blocks(w->data, block_bytes(c)) *
612 unsigned int i, sectors = set_blocks(w->data, block_bytes(c)) *
613 c->sb.block_size;
614
615 struct bio *bio;
616 struct bio_list list;
617 bio_list_init(&list);
618
619 if (!w->need_write) {
620 closure_return_with_destructor(cl, journal_write_unlock);

--- 79 unchanged lines hidden (view full) ---

700 c->journal.io_in_flight = 1;
701 closure_call(cl, journal_write_unlocked, NULL, &c->cl);
702 } else {
703 spin_unlock(&c->journal.lock);
704 }
705}
706
707static struct journal_write *journal_wait_for_write(struct cache_set *c,
613 c->sb.block_size;
614
615 struct bio *bio;
616 struct bio_list list;
617 bio_list_init(&list);
618
619 if (!w->need_write) {
620 closure_return_with_destructor(cl, journal_write_unlock);

--- 79 unchanged lines hidden (view full) ---

700 c->journal.io_in_flight = 1;
701 closure_call(cl, journal_write_unlocked, NULL, &c->cl);
702 } else {
703 spin_unlock(&c->journal.lock);
704 }
705}
706
707static struct journal_write *journal_wait_for_write(struct cache_set *c,
708 unsigned nkeys)
708 unsigned int nkeys)
709 __acquires(&c->journal.lock)
710{
711 size_t sectors;
712 struct closure cl;
713 bool wait = false;
714
715 closure_init_stack(&cl);
716

--- 137 unchanged lines hidden ---
709 __acquires(&c->journal.lock)
710{
711 size_t sectors;
712 struct closure cl;
713 bool wait = false;
714
715 closure_init_stack(&cl);
716

--- 137 unchanged lines hidden ---