xref: /linux/drivers/md/dm-io.c (revision c4c11dd160a8cc98f402c4e12f94b1572e822ffd)
1 /*
2  * Copyright (C) 2003 Sistina Software
3  * Copyright (C) 2006 Red Hat GmbH
4  *
5  * This file is released under the GPL.
6  */
7 
8 #include "dm.h"
9 
10 #include <linux/device-mapper.h>
11 
12 #include <linux/bio.h>
13 #include <linux/mempool.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/dm-io.h>
18 
19 #define DM_MSG_PREFIX "io"
20 
21 #define DM_IO_MAX_REGIONS	BITS_PER_LONG
22 #define MIN_IOS		16
23 #define MIN_BIOS	16
24 
25 struct dm_io_client {
26 	mempool_t *pool;
27 	struct bio_set *bios;
28 };
29 
30 /*
31  * Aligning 'struct io' reduces the number of bits required to store
32  * its address.  Refer to store_io_and_region_in_bio() below.
33  */
34 struct io {
35 	unsigned long error_bits;
36 	atomic_t count;
37 	struct task_struct *sleeper;
38 	struct dm_io_client *client;
39 	io_notify_fn callback;
40 	void *context;
41 	void *vma_invalidate_address;
42 	unsigned long vma_invalidate_size;
43 } __attribute__((aligned(DM_IO_MAX_REGIONS)));
44 
45 static struct kmem_cache *_dm_io_cache;
46 
47 /*
48  * Create a client with mempool and bioset.
49  */
50 struct dm_io_client *dm_io_client_create(void)
51 {
52 	struct dm_io_client *client;
53 
54 	client = kmalloc(sizeof(*client), GFP_KERNEL);
55 	if (!client)
56 		return ERR_PTR(-ENOMEM);
57 
58 	client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache);
59 	if (!client->pool)
60 		goto bad;
61 
62 	client->bios = bioset_create(MIN_BIOS, 0);
63 	if (!client->bios)
64 		goto bad;
65 
66 	return client;
67 
68    bad:
69 	if (client->pool)
70 		mempool_destroy(client->pool);
71 	kfree(client);
72 	return ERR_PTR(-ENOMEM);
73 }
74 EXPORT_SYMBOL(dm_io_client_create);
75 
76 void dm_io_client_destroy(struct dm_io_client *client)
77 {
78 	mempool_destroy(client->pool);
79 	bioset_free(client->bios);
80 	kfree(client);
81 }
82 EXPORT_SYMBOL(dm_io_client_destroy);
83 
84 /*-----------------------------------------------------------------
85  * We need to keep track of which region a bio is doing io for.
86  * To avoid a memory allocation to store just 5 or 6 bits, we
87  * ensure the 'struct io' pointer is aligned so enough low bits are
88  * always zero and then combine it with the region number directly in
89  * bi_private.
90  *---------------------------------------------------------------*/
91 static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
92 				       unsigned region)
93 {
94 	if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
95 		DMCRIT("Unaligned struct io pointer %p", io);
96 		BUG();
97 	}
98 
99 	bio->bi_private = (void *)((unsigned long)io | region);
100 }
101 
102 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
103 				       unsigned *region)
104 {
105 	unsigned long val = (unsigned long)bio->bi_private;
106 
107 	*io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
108 	*region = val & (DM_IO_MAX_REGIONS - 1);
109 }
110 
111 /*-----------------------------------------------------------------
112  * We need an io object to keep track of the number of bios that
113  * have been dispatched for a particular io.
114  *---------------------------------------------------------------*/
115 static void dec_count(struct io *io, unsigned int region, int error)
116 {
117 	if (error)
118 		set_bit(region, &io->error_bits);
119 
120 	if (atomic_dec_and_test(&io->count)) {
121 		if (io->vma_invalidate_size)
122 			invalidate_kernel_vmap_range(io->vma_invalidate_address,
123 						     io->vma_invalidate_size);
124 
125 		if (io->sleeper)
126 			wake_up_process(io->sleeper);
127 
128 		else {
129 			unsigned long r = io->error_bits;
130 			io_notify_fn fn = io->callback;
131 			void *context = io->context;
132 
133 			mempool_free(io, io->client->pool);
134 			fn(r, context);
135 		}
136 	}
137 }
138 
139 static void endio(struct bio *bio, int error)
140 {
141 	struct io *io;
142 	unsigned region;
143 
144 	if (error && bio_data_dir(bio) == READ)
145 		zero_fill_bio(bio);
146 
147 	/*
148 	 * The bio destructor in bio_put() may use the io object.
149 	 */
150 	retrieve_io_and_region_from_bio(bio, &io, &region);
151 
152 	bio_put(bio);
153 
154 	dec_count(io, region, error);
155 }
156 
157 /*-----------------------------------------------------------------
158  * These little objects provide an abstraction for getting a new
159  * destination page for io.
160  *---------------------------------------------------------------*/
161 struct dpages {
162 	void (*get_page)(struct dpages *dp,
163 			 struct page **p, unsigned long *len, unsigned *offset);
164 	void (*next_page)(struct dpages *dp);
165 
166 	unsigned context_u;
167 	void *context_ptr;
168 
169 	void *vma_invalidate_address;
170 	unsigned long vma_invalidate_size;
171 };
172 
173 /*
174  * Functions for getting the pages from a list.
175  */
176 static void list_get_page(struct dpages *dp,
177 		  struct page **p, unsigned long *len, unsigned *offset)
178 {
179 	unsigned o = dp->context_u;
180 	struct page_list *pl = (struct page_list *) dp->context_ptr;
181 
182 	*p = pl->page;
183 	*len = PAGE_SIZE - o;
184 	*offset = o;
185 }
186 
187 static void list_next_page(struct dpages *dp)
188 {
189 	struct page_list *pl = (struct page_list *) dp->context_ptr;
190 	dp->context_ptr = pl->next;
191 	dp->context_u = 0;
192 }
193 
194 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
195 {
196 	dp->get_page = list_get_page;
197 	dp->next_page = list_next_page;
198 	dp->context_u = offset;
199 	dp->context_ptr = pl;
200 }
201 
202 /*
203  * Functions for getting the pages from a bvec.
204  */
205 static void bvec_get_page(struct dpages *dp,
206 		  struct page **p, unsigned long *len, unsigned *offset)
207 {
208 	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
209 	*p = bvec->bv_page;
210 	*len = bvec->bv_len;
211 	*offset = bvec->bv_offset;
212 }
213 
214 static void bvec_next_page(struct dpages *dp)
215 {
216 	struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
217 	dp->context_ptr = bvec + 1;
218 }
219 
220 static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
221 {
222 	dp->get_page = bvec_get_page;
223 	dp->next_page = bvec_next_page;
224 	dp->context_ptr = bvec;
225 }
226 
227 /*
228  * Functions for getting the pages from a VMA.
229  */
230 static void vm_get_page(struct dpages *dp,
231 		 struct page **p, unsigned long *len, unsigned *offset)
232 {
233 	*p = vmalloc_to_page(dp->context_ptr);
234 	*offset = dp->context_u;
235 	*len = PAGE_SIZE - dp->context_u;
236 }
237 
238 static void vm_next_page(struct dpages *dp)
239 {
240 	dp->context_ptr += PAGE_SIZE - dp->context_u;
241 	dp->context_u = 0;
242 }
243 
244 static void vm_dp_init(struct dpages *dp, void *data)
245 {
246 	dp->get_page = vm_get_page;
247 	dp->next_page = vm_next_page;
248 	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
249 	dp->context_ptr = data;
250 }
251 
252 /*
253  * Functions for getting the pages from kernel memory.
254  */
255 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
256 			unsigned *offset)
257 {
258 	*p = virt_to_page(dp->context_ptr);
259 	*offset = dp->context_u;
260 	*len = PAGE_SIZE - dp->context_u;
261 }
262 
263 static void km_next_page(struct dpages *dp)
264 {
265 	dp->context_ptr += PAGE_SIZE - dp->context_u;
266 	dp->context_u = 0;
267 }
268 
269 static void km_dp_init(struct dpages *dp, void *data)
270 {
271 	dp->get_page = km_get_page;
272 	dp->next_page = km_next_page;
273 	dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
274 	dp->context_ptr = data;
275 }
276 
277 /*-----------------------------------------------------------------
278  * IO routines that accept a list of pages.
279  *---------------------------------------------------------------*/
280 static void do_region(int rw, unsigned region, struct dm_io_region *where,
281 		      struct dpages *dp, struct io *io)
282 {
283 	struct bio *bio;
284 	struct page *page;
285 	unsigned long len;
286 	unsigned offset;
287 	unsigned num_bvecs;
288 	sector_t remaining = where->count;
289 	struct request_queue *q = bdev_get_queue(where->bdev);
290 	unsigned short logical_block_size = queue_logical_block_size(q);
291 	sector_t num_sectors;
292 
293 	/*
294 	 * where->count may be zero if rw holds a flush and we need to
295 	 * send a zero-sized flush.
296 	 */
297 	do {
298 		/*
299 		 * Allocate a suitably sized-bio.
300 		 */
301 		if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
302 			num_bvecs = 1;
303 		else
304 			num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
305 					  dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
306 
307 		bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
308 		bio->bi_sector = where->sector + (where->count - remaining);
309 		bio->bi_bdev = where->bdev;
310 		bio->bi_end_io = endio;
311 		store_io_and_region_in_bio(bio, io, region);
312 
313 		if (rw & REQ_DISCARD) {
314 			num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
315 			bio->bi_size = num_sectors << SECTOR_SHIFT;
316 			remaining -= num_sectors;
317 		} else if (rw & REQ_WRITE_SAME) {
318 			/*
319 			 * WRITE SAME only uses a single page.
320 			 */
321 			dp->get_page(dp, &page, &len, &offset);
322 			bio_add_page(bio, page, logical_block_size, offset);
323 			num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
324 			bio->bi_size = num_sectors << SECTOR_SHIFT;
325 
326 			offset = 0;
327 			remaining -= num_sectors;
328 			dp->next_page(dp);
329 		} else while (remaining) {
330 			/*
331 			 * Try and add as many pages as possible.
332 			 */
333 			dp->get_page(dp, &page, &len, &offset);
334 			len = min(len, to_bytes(remaining));
335 			if (!bio_add_page(bio, page, len, offset))
336 				break;
337 
338 			offset = 0;
339 			remaining -= to_sector(len);
340 			dp->next_page(dp);
341 		}
342 
343 		atomic_inc(&io->count);
344 		submit_bio(rw, bio);
345 	} while (remaining);
346 }
347 
348 static void dispatch_io(int rw, unsigned int num_regions,
349 			struct dm_io_region *where, struct dpages *dp,
350 			struct io *io, int sync)
351 {
352 	int i;
353 	struct dpages old_pages = *dp;
354 
355 	BUG_ON(num_regions > DM_IO_MAX_REGIONS);
356 
357 	if (sync)
358 		rw |= REQ_SYNC;
359 
360 	/*
361 	 * For multiple regions we need to be careful to rewind
362 	 * the dp object for each call to do_region.
363 	 */
364 	for (i = 0; i < num_regions; i++) {
365 		*dp = old_pages;
366 		if (where[i].count || (rw & REQ_FLUSH))
367 			do_region(rw, i, where + i, dp, io);
368 	}
369 
370 	/*
371 	 * Drop the extra reference that we were holding to avoid
372 	 * the io being completed too early.
373 	 */
374 	dec_count(io, 0, 0);
375 }
376 
377 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
378 		   struct dm_io_region *where, int rw, struct dpages *dp,
379 		   unsigned long *error_bits)
380 {
381 	/*
382 	 * gcc <= 4.3 can't do the alignment for stack variables, so we must
383 	 * align it on our own.
384 	 * volatile prevents the optimizer from removing or reusing
385 	 * "io_" field from the stack frame (allowed in ANSI C).
386 	 */
387 	volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
388 	struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
389 
390 	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
391 		WARN_ON(1);
392 		return -EIO;
393 	}
394 
395 	io->error_bits = 0;
396 	atomic_set(&io->count, 1); /* see dispatch_io() */
397 	io->sleeper = current;
398 	io->client = client;
399 
400 	io->vma_invalidate_address = dp->vma_invalidate_address;
401 	io->vma_invalidate_size = dp->vma_invalidate_size;
402 
403 	dispatch_io(rw, num_regions, where, dp, io, 1);
404 
405 	while (1) {
406 		set_current_state(TASK_UNINTERRUPTIBLE);
407 
408 		if (!atomic_read(&io->count))
409 			break;
410 
411 		io_schedule();
412 	}
413 	set_current_state(TASK_RUNNING);
414 
415 	if (error_bits)
416 		*error_bits = io->error_bits;
417 
418 	return io->error_bits ? -EIO : 0;
419 }
420 
421 static int async_io(struct dm_io_client *client, unsigned int num_regions,
422 		    struct dm_io_region *where, int rw, struct dpages *dp,
423 		    io_notify_fn fn, void *context)
424 {
425 	struct io *io;
426 
427 	if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
428 		WARN_ON(1);
429 		fn(1, context);
430 		return -EIO;
431 	}
432 
433 	io = mempool_alloc(client->pool, GFP_NOIO);
434 	io->error_bits = 0;
435 	atomic_set(&io->count, 1); /* see dispatch_io() */
436 	io->sleeper = NULL;
437 	io->client = client;
438 	io->callback = fn;
439 	io->context = context;
440 
441 	io->vma_invalidate_address = dp->vma_invalidate_address;
442 	io->vma_invalidate_size = dp->vma_invalidate_size;
443 
444 	dispatch_io(rw, num_regions, where, dp, io, 0);
445 	return 0;
446 }
447 
448 static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
449 		   unsigned long size)
450 {
451 	/* Set up dpages based on memory type */
452 
453 	dp->vma_invalidate_address = NULL;
454 	dp->vma_invalidate_size = 0;
455 
456 	switch (io_req->mem.type) {
457 	case DM_IO_PAGE_LIST:
458 		list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
459 		break;
460 
461 	case DM_IO_BVEC:
462 		bvec_dp_init(dp, io_req->mem.ptr.bvec);
463 		break;
464 
465 	case DM_IO_VMA:
466 		flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
467 		if ((io_req->bi_rw & RW_MASK) == READ) {
468 			dp->vma_invalidate_address = io_req->mem.ptr.vma;
469 			dp->vma_invalidate_size = size;
470 		}
471 		vm_dp_init(dp, io_req->mem.ptr.vma);
472 		break;
473 
474 	case DM_IO_KMEM:
475 		km_dp_init(dp, io_req->mem.ptr.addr);
476 		break;
477 
478 	default:
479 		return -EINVAL;
480 	}
481 
482 	return 0;
483 }
484 
485 /*
486  * New collapsed (a)synchronous interface.
487  *
488  * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
489  * the queue with blk_unplug() some time later or set REQ_SYNC in
490 io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
491  * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
492  */
493 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
494 	  struct dm_io_region *where, unsigned long *sync_error_bits)
495 {
496 	int r;
497 	struct dpages dp;
498 
499 	r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
500 	if (r)
501 		return r;
502 
503 	if (!io_req->notify.fn)
504 		return sync_io(io_req->client, num_regions, where,
505 			       io_req->bi_rw, &dp, sync_error_bits);
506 
507 	return async_io(io_req->client, num_regions, where, io_req->bi_rw,
508 			&dp, io_req->notify.fn, io_req->notify.context);
509 }
510 EXPORT_SYMBOL(dm_io);
511 
512 int __init dm_io_init(void)
513 {
514 	_dm_io_cache = KMEM_CACHE(io, 0);
515 	if (!_dm_io_cache)
516 		return -ENOMEM;
517 
518 	return 0;
519 }
520 
521 void dm_io_exit(void)
522 {
523 	kmem_cache_destroy(_dm_io_cache);
524 	_dm_io_cache = NULL;
525 }
526