xref: /linux/drivers/md/dm-io.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*
2  * Copyright (C) 2003 Sistina Software
3  *
4  * This file is released under the GPL.
5  */
6 
7 #include "dm-io.h"
8 
9 #include <linux/bio.h>
10 #include <linux/mempool.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 
15 static struct bio_set *_bios;
16 
17 /* FIXME: can we shrink this ? */
18 struct io {
19 	unsigned long error;
20 	atomic_t count;
21 	struct task_struct *sleeper;
22 	io_notify_fn callback;
23 	void *context;
24 };
25 
26 /*
27  * io contexts are only dynamically allocated for asynchronous
28  * io.  Since async io is likely to be the majority of io we'll
29  * have the same number of io contexts as buffer heads ! (FIXME:
30  * must reduce this).
31  */
32 static unsigned _num_ios;
33 static mempool_t *_io_pool;
34 
35 static unsigned int pages_to_ios(unsigned int pages)
36 {
37 	return 4 * pages;	/* too many ? */
38 }
39 
40 static int resize_pool(unsigned int new_ios)
41 {
42 	int r = 0;
43 
44 	if (_io_pool) {
45 		if (new_ios == 0) {
46 			/* free off the pool */
47 			mempool_destroy(_io_pool);
48 			_io_pool = NULL;
49 			bioset_free(_bios);
50 
51 		} else {
52 			/* resize the pool */
53 			r = mempool_resize(_io_pool, new_ios, GFP_KERNEL);
54 		}
55 
56 	} else {
57 		/* create new pool */
58 		_io_pool = mempool_create_kmalloc_pool(new_ios,
59 						       sizeof(struct io));
60 		if (!_io_pool)
61 			return -ENOMEM;
62 
63 		_bios = bioset_create(16, 16, 4);
64 		if (!_bios) {
65 			mempool_destroy(_io_pool);
66 			_io_pool = NULL;
67 			return -ENOMEM;
68 		}
69 	}
70 
71 	if (!r)
72 		_num_ios = new_ios;
73 
74 	return r;
75 }
76 
77 int dm_io_get(unsigned int num_pages)
78 {
79 	return resize_pool(_num_ios + pages_to_ios(num_pages));
80 }
81 
82 void dm_io_put(unsigned int num_pages)
83 {
84 	resize_pool(_num_ios - pages_to_ios(num_pages));
85 }
86 
87 /*-----------------------------------------------------------------
88  * We need to keep track of which region a bio is doing io for.
89  * In order to save a memory allocation we store this the last
90  * bvec which we know is unused (blech).
91  * XXX This is ugly and can OOPS with some configs... find another way.
92  *---------------------------------------------------------------*/
93 static inline void bio_set_region(struct bio *bio, unsigned region)
94 {
95 	bio->bi_io_vec[bio->bi_max_vecs].bv_len = region;
96 }
97 
98 static inline unsigned bio_get_region(struct bio *bio)
99 {
100 	return bio->bi_io_vec[bio->bi_max_vecs].bv_len;
101 }
102 
103 /*-----------------------------------------------------------------
104  * We need an io object to keep track of the number of bios that
105  * have been dispatched for a particular io.
106  *---------------------------------------------------------------*/
107 static void dec_count(struct io *io, unsigned int region, int error)
108 {
109 	if (error)
110 		set_bit(region, &io->error);
111 
112 	if (atomic_dec_and_test(&io->count)) {
113 		if (io->sleeper)
114 			wake_up_process(io->sleeper);
115 
116 		else {
117 			int r = io->error;
118 			io_notify_fn fn = io->callback;
119 			void *context = io->context;
120 
121 			mempool_free(io, _io_pool);
122 			fn(r, context);
123 		}
124 	}
125 }
126 
127 static int endio(struct bio *bio, unsigned int done, int error)
128 {
129 	struct io *io = (struct io *) bio->bi_private;
130 
131 	/* keep going until we've finished */
132 	if (bio->bi_size)
133 		return 1;
134 
135 	if (error && bio_data_dir(bio) == READ)
136 		zero_fill_bio(bio);
137 
138 	dec_count(io, bio_get_region(bio), error);
139 	bio->bi_max_vecs++;
140 	bio_put(bio);
141 
142 	return 0;
143 }
144 
145 /*-----------------------------------------------------------------
146  * These little objects provide an abstraction for getting a new
147  * destination page for io.
148  *---------------------------------------------------------------*/
149 struct dpages {
150 	void (*get_page)(struct dpages *dp,
151 			 struct page **p, unsigned long *len, unsigned *offset);
152 	void (*next_page)(struct dpages *dp);
153 
154 	unsigned context_u;
155 	void *context_ptr;
156 };
157 
158 /*
159  * Functions for getting the pages from a list.
160  */
161 static void list_get_page(struct dpages *dp,
162 		  struct page **p, unsigned long *len, unsigned *offset)
163 {
164 	unsigned o = dp->context_u;
165 	struct page_list *pl = (struct page_list *) dp->context_ptr;
166 
167 	*p = pl->page;
168 	*len = PAGE_SIZE - o;
169 	*offset = o;
170 }
171 
172 static void list_next_page(struct dpages *dp)
173 {
174 	struct page_list *pl = (struct page_list *) dp->context_ptr;
175 	dp->context_ptr = pl->next;
176 	dp->context_u = 0;
177 }
178 
179 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
180 {
181 	dp->get_page = list_get_page;
182 	dp->next_page = list_next_page;
183 	dp->context_u = offset;
184 	dp->context_ptr = pl;
185 }
186 
187 /*
188  * Functions for getting the pages from a bvec.
189  */
190 static void bvec_get_page(struct dpages *dp,
191 		  struct page **p, unsigned long *len, unsigned *offset)
192 {
193 	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
194 	*p = bvec->bv_page;
195 	*len = bvec->bv_len;
196 	*offset = bvec->bv_offset;
197 }
198 
199 static void bvec_next_page(struct dpages *dp)
200 {
201 	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
202 	dp->context_ptr = bvec + 1;
203 }
204 
205 static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
206 {
207 	dp->get_page = bvec_get_page;
208 	dp->next_page = bvec_next_page;
209 	dp->context_ptr = bvec;
210 }
211 
212 static void vm_get_page(struct dpages *dp,
213 		 struct page **p, unsigned long *len, unsigned *offset)
214 {
215 	*p = vmalloc_to_page(dp->context_ptr);
216 	*offset = dp->context_u;
217 	*len = PAGE_SIZE - dp->context_u;
218 }
219 
220 static void vm_next_page(struct dpages *dp)
221 {
222 	dp->context_ptr += PAGE_SIZE - dp->context_u;
223 	dp->context_u = 0;
224 }
225 
226 static void vm_dp_init(struct dpages *dp, void *data)
227 {
228 	dp->get_page = vm_get_page;
229 	dp->next_page = vm_next_page;
230 	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
231 	dp->context_ptr = data;
232 }
233 
234 static void dm_bio_destructor(struct bio *bio)
235 {
236 	bio_free(bio, _bios);
237 }
238 
239 /*-----------------------------------------------------------------
240  * IO routines that accept a list of pages.
241  *---------------------------------------------------------------*/
242 static void do_region(int rw, unsigned int region, struct io_region *where,
243 		      struct dpages *dp, struct io *io)
244 {
245 	struct bio *bio;
246 	struct page *page;
247 	unsigned long len;
248 	unsigned offset;
249 	unsigned num_bvecs;
250 	sector_t remaining = where->count;
251 
252 	while (remaining) {
253 		/*
254 		 * Allocate a suitably sized-bio: we add an extra
255 		 * bvec for bio_get/set_region() and decrement bi_max_vecs
256 		 * to hide it from bio_add_page().
257 		 */
258 		num_bvecs = (remaining / (PAGE_SIZE >> SECTOR_SHIFT)) + 2;
259 		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, _bios);
260 		bio->bi_sector = where->sector + (where->count - remaining);
261 		bio->bi_bdev = where->bdev;
262 		bio->bi_end_io = endio;
263 		bio->bi_private = io;
264 		bio->bi_destructor = dm_bio_destructor;
265 		bio->bi_max_vecs--;
266 		bio_set_region(bio, region);
267 
268 		/*
269 		 * Try and add as many pages as possible.
270 		 */
271 		while (remaining) {
272 			dp->get_page(dp, &page, &len, &offset);
273 			len = min(len, to_bytes(remaining));
274 			if (!bio_add_page(bio, page, len, offset))
275 				break;
276 
277 			offset = 0;
278 			remaining -= to_sector(len);
279 			dp->next_page(dp);
280 		}
281 
282 		atomic_inc(&io->count);
283 		submit_bio(rw, bio);
284 	}
285 }
286 
287 static void dispatch_io(int rw, unsigned int num_regions,
288 			struct io_region *where, struct dpages *dp,
289 			struct io *io, int sync)
290 {
291 	int i;
292 	struct dpages old_pages = *dp;
293 
294 	if (sync)
295 		rw |= (1 << BIO_RW_SYNC);
296 
297 	/*
298 	 * For multiple regions we need to be careful to rewind
299 	 * the dp object for each call to do_region.
300 	 */
301 	for (i = 0; i < num_regions; i++) {
302 		*dp = old_pages;
303 		if (where[i].count)
304 			do_region(rw, i, where + i, dp, io);
305 	}
306 
307 	/*
308 	 * Drop the extra reference that we were holding to avoid
309 	 * the io being completed too early.
310 	 */
311 	dec_count(io, 0, 0);
312 }
313 
314 static int sync_io(unsigned int num_regions, struct io_region *where,
315 	    int rw, struct dpages *dp, unsigned long *error_bits)
316 {
317 	struct io io;
318 
319 	if (num_regions > 1 && rw != WRITE) {
320 		WARN_ON(1);
321 		return -EIO;
322 	}
323 
324 	io.error = 0;
325 	atomic_set(&io.count, 1); /* see dispatch_io() */
326 	io.sleeper = current;
327 
328 	dispatch_io(rw, num_regions, where, dp, &io, 1);
329 
330 	while (1) {
331 		set_current_state(TASK_UNINTERRUPTIBLE);
332 
333 		if (!atomic_read(&io.count) || signal_pending(current))
334 			break;
335 
336 		io_schedule();
337 	}
338 	set_current_state(TASK_RUNNING);
339 
340 	if (atomic_read(&io.count))
341 		return -EINTR;
342 
343 	*error_bits = io.error;
344 	return io.error ? -EIO : 0;
345 }
346 
347 static int async_io(unsigned int num_regions, struct io_region *where, int rw,
348 	     struct dpages *dp, io_notify_fn fn, void *context)
349 {
350 	struct io *io;
351 
352 	if (num_regions > 1 && rw != WRITE) {
353 		WARN_ON(1);
354 		fn(1, context);
355 		return -EIO;
356 	}
357 
358 	io = mempool_alloc(_io_pool, GFP_NOIO);
359 	io->error = 0;
360 	atomic_set(&io->count, 1); /* see dispatch_io() */
361 	io->sleeper = NULL;
362 	io->callback = fn;
363 	io->context = context;
364 
365 	dispatch_io(rw, num_regions, where, dp, io, 0);
366 	return 0;
367 }
368 
369 int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw,
370 	       struct page_list *pl, unsigned int offset,
371 	       unsigned long *error_bits)
372 {
373 	struct dpages dp;
374 	list_dp_init(&dp, pl, offset);
375 	return sync_io(num_regions, where, rw, &dp, error_bits);
376 }
377 
378 int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
379 		    struct bio_vec *bvec, unsigned long *error_bits)
380 {
381 	struct dpages dp;
382 	bvec_dp_init(&dp, bvec);
383 	return sync_io(num_regions, where, rw, &dp, error_bits);
384 }
385 
386 int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
387 		  void *data, unsigned long *error_bits)
388 {
389 	struct dpages dp;
390 	vm_dp_init(&dp, data);
391 	return sync_io(num_regions, where, rw, &dp, error_bits);
392 }
393 
394 int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
395 		struct page_list *pl, unsigned int offset,
396 		io_notify_fn fn, void *context)
397 {
398 	struct dpages dp;
399 	list_dp_init(&dp, pl, offset);
400 	return async_io(num_regions, where, rw, &dp, fn, context);
401 }
402 
403 int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
404 		     struct bio_vec *bvec, io_notify_fn fn, void *context)
405 {
406 	struct dpages dp;
407 	bvec_dp_init(&dp, bvec);
408 	return async_io(num_regions, where, rw, &dp, fn, context);
409 }
410 
411 int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
412 		   void *data, io_notify_fn fn, void *context)
413 {
414 	struct dpages dp;
415 	vm_dp_init(&dp, data);
416 	return async_io(num_regions, where, rw, &dp, fn, context);
417 }
418 
419 EXPORT_SYMBOL(dm_io_get);
420 EXPORT_SYMBOL(dm_io_put);
421 EXPORT_SYMBOL(dm_io_sync);
422 EXPORT_SYMBOL(dm_io_async);
423 EXPORT_SYMBOL(dm_io_sync_bvec);
424 EXPORT_SYMBOL(dm_io_async_bvec);
425 EXPORT_SYMBOL(dm_io_sync_vm);
426 EXPORT_SYMBOL(dm_io_async_vm);
427