xref: /linux/drivers/md/bcache/movinggc.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * Moving/copying garbage collector
3  *
4  * Copyright 2012 Google, Inc.
5  */
6 
7 #include "bcache.h"
8 #include "btree.h"
9 #include "debug.h"
10 #include "request.h"
11 
12 #include <trace/events/bcache.h>
13 
14 struct moving_io {
15 	struct closure		cl;
16 	struct keybuf_key	*w;
17 	struct data_insert_op	op;
18 	struct bbio		bio;
19 };
20 
21 static bool moving_pred(struct keybuf *buf, struct bkey *k)
22 {
23 	struct cache_set *c = container_of(buf, struct cache_set,
24 					   moving_gc_keys);
25 	unsigned i;
26 
27 	for (i = 0; i < KEY_PTRS(k); i++)
28 		if (ptr_available(c, k, i) &&
29 		    GC_MOVE(PTR_BUCKET(c, k, i)))
30 			return true;
31 
32 	return false;
33 }
34 
35 /* Moving GC - IO loop */
36 
37 static void moving_io_destructor(struct closure *cl)
38 {
39 	struct moving_io *io = container_of(cl, struct moving_io, cl);
40 	kfree(io);
41 }
42 
43 static void write_moving_finish(struct closure *cl)
44 {
45 	struct moving_io *io = container_of(cl, struct moving_io, cl);
46 	struct bio *bio = &io->bio.bio;
47 
48 	bio_free_pages(bio);
49 
50 	if (io->op.replace_collision)
51 		trace_bcache_gc_copy_collision(&io->w->key);
52 
53 	bch_keybuf_del(&io->op.c->moving_gc_keys, io->w);
54 
55 	up(&io->op.c->moving_in_flight);
56 
57 	closure_return_with_destructor(cl, moving_io_destructor);
58 }
59 
60 static void read_moving_endio(struct bio *bio)
61 {
62 	struct bbio *b = container_of(bio, struct bbio, bio);
63 	struct moving_io *io = container_of(bio->bi_private,
64 					    struct moving_io, cl);
65 
66 	if (bio->bi_error)
67 		io->op.error = bio->bi_error;
68 	else if (!KEY_DIRTY(&b->key) &&
69 		 ptr_stale(io->op.c, &b->key, 0)) {
70 		io->op.error = -EINTR;
71 	}
72 
73 	bch_bbio_endio(io->op.c, bio, bio->bi_error, "reading data to move");
74 }
75 
76 static void moving_init(struct moving_io *io)
77 {
78 	struct bio *bio = &io->bio.bio;
79 
80 	bio_init(bio, bio->bi_inline_vecs,
81 		 DIV_ROUND_UP(KEY_SIZE(&io->w->key), PAGE_SECTORS));
82 	bio_get(bio);
83 	bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
84 
85 	bio->bi_iter.bi_size	= KEY_SIZE(&io->w->key) << 9;
86 	bio->bi_private		= &io->cl;
87 	bch_bio_map(bio, NULL);
88 }
89 
90 static void write_moving(struct closure *cl)
91 {
92 	struct moving_io *io = container_of(cl, struct moving_io, cl);
93 	struct data_insert_op *op = &io->op;
94 
95 	if (!op->error) {
96 		moving_init(io);
97 
98 		io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
99 		op->write_prio		= 1;
100 		op->bio			= &io->bio.bio;
101 
102 		op->writeback		= KEY_DIRTY(&io->w->key);
103 		op->csum		= KEY_CSUM(&io->w->key);
104 
105 		bkey_copy(&op->replace_key, &io->w->key);
106 		op->replace		= true;
107 
108 		closure_call(&op->cl, bch_data_insert, NULL, cl);
109 	}
110 
111 	continue_at(cl, write_moving_finish, op->wq);
112 }
113 
114 static void read_moving_submit(struct closure *cl)
115 {
116 	struct moving_io *io = container_of(cl, struct moving_io, cl);
117 	struct bio *bio = &io->bio.bio;
118 
119 	bch_submit_bbio(bio, io->op.c, &io->w->key, 0);
120 
121 	continue_at(cl, write_moving, io->op.wq);
122 }
123 
124 static void read_moving(struct cache_set *c)
125 {
126 	struct keybuf_key *w;
127 	struct moving_io *io;
128 	struct bio *bio;
129 	struct closure cl;
130 
131 	closure_init_stack(&cl);
132 
133 	/* XXX: if we error, background writeback could stall indefinitely */
134 
135 	while (!test_bit(CACHE_SET_STOPPING, &c->flags)) {
136 		w = bch_keybuf_next_rescan(c, &c->moving_gc_keys,
137 					   &MAX_KEY, moving_pred);
138 		if (!w)
139 			break;
140 
141 		if (ptr_stale(c, &w->key, 0)) {
142 			bch_keybuf_del(&c->moving_gc_keys, w);
143 			continue;
144 		}
145 
146 		io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
147 			     * DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
148 			     GFP_KERNEL);
149 		if (!io)
150 			goto err;
151 
152 		w->private	= io;
153 		io->w		= w;
154 		io->op.inode	= KEY_INODE(&w->key);
155 		io->op.c	= c;
156 		io->op.wq	= c->moving_gc_wq;
157 
158 		moving_init(io);
159 		bio = &io->bio.bio;
160 
161 		bio_set_op_attrs(bio, REQ_OP_READ, 0);
162 		bio->bi_end_io	= read_moving_endio;
163 
164 		if (bio_alloc_pages(bio, GFP_KERNEL))
165 			goto err;
166 
167 		trace_bcache_gc_copy(&w->key);
168 
169 		down(&c->moving_in_flight);
170 		closure_call(&io->cl, read_moving_submit, NULL, &cl);
171 	}
172 
173 	if (0) {
174 err:		if (!IS_ERR_OR_NULL(w->private))
175 			kfree(w->private);
176 
177 		bch_keybuf_del(&c->moving_gc_keys, w);
178 	}
179 
180 	closure_sync(&cl);
181 }
182 
183 static bool bucket_cmp(struct bucket *l, struct bucket *r)
184 {
185 	return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
186 }
187 
188 static unsigned bucket_heap_top(struct cache *ca)
189 {
190 	struct bucket *b;
191 	return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
192 }
193 
194 void bch_moving_gc(struct cache_set *c)
195 {
196 	struct cache *ca;
197 	struct bucket *b;
198 	unsigned i;
199 
200 	if (!c->copy_gc_enabled)
201 		return;
202 
203 	mutex_lock(&c->bucket_lock);
204 
205 	for_each_cache(ca, c, i) {
206 		unsigned sectors_to_move = 0;
207 		unsigned reserve_sectors = ca->sb.bucket_size *
208 			fifo_used(&ca->free[RESERVE_MOVINGGC]);
209 
210 		ca->heap.used = 0;
211 
212 		for_each_bucket(b, ca) {
213 			if (GC_MARK(b) == GC_MARK_METADATA ||
214 			    !GC_SECTORS_USED(b) ||
215 			    GC_SECTORS_USED(b) == ca->sb.bucket_size ||
216 			    atomic_read(&b->pin))
217 				continue;
218 
219 			if (!heap_full(&ca->heap)) {
220 				sectors_to_move += GC_SECTORS_USED(b);
221 				heap_add(&ca->heap, b, bucket_cmp);
222 			} else if (bucket_cmp(b, heap_peek(&ca->heap))) {
223 				sectors_to_move -= bucket_heap_top(ca);
224 				sectors_to_move += GC_SECTORS_USED(b);
225 
226 				ca->heap.data[0] = b;
227 				heap_sift(&ca->heap, 0, bucket_cmp);
228 			}
229 		}
230 
231 		while (sectors_to_move > reserve_sectors) {
232 			heap_pop(&ca->heap, b, bucket_cmp);
233 			sectors_to_move -= GC_SECTORS_USED(b);
234 		}
235 
236 		while (heap_pop(&ca->heap, b, bucket_cmp))
237 			SET_GC_MOVE(b, 1);
238 	}
239 
240 	mutex_unlock(&c->bucket_lock);
241 
242 	c->moving_gc_keys.last_scanned = ZERO_KEY;
243 
244 	read_moving(c);
245 }
246 
247 void bch_moving_init_cache_set(struct cache_set *c)
248 {
249 	bch_keybuf_init(&c->moving_gc_keys);
250 	sema_init(&c->moving_in_flight, 64);
251 }
252