1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2003 Sistina Software
4 * Copyright (C) 2006 Red Hat GmbH
5 *
6 * This file is released under the GPL.
7 */
8
9 #include "dm-core.h"
10
11 #include <linux/device-mapper.h>
12
13 #include <linux/bio.h>
14 #include <linux/completion.h>
15 #include <linux/mempool.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dm-io.h>
20
21 #define DM_MSG_PREFIX "io"
22
23 #define DM_IO_MAX_REGIONS BITS_PER_LONG
24
25 struct dm_io_client {
26 mempool_t pool;
27 struct bio_set bios;
28 };
29
30 /*
31 * Aligning 'struct io' reduces the number of bits required to store
32 * its address. Refer to store_io_and_region_in_bio() below.
33 */
34 struct io {
35 unsigned long error_bits;
36 atomic_t count;
37 struct dm_io_client *client;
38 io_notify_fn callback;
39 void *context;
40 void *vma_invalidate_address;
41 unsigned long vma_invalidate_size;
42 } __aligned(DM_IO_MAX_REGIONS);
43
44 static struct kmem_cache *_dm_io_cache;
45
46 /*
47 * Create a client with mempool and bioset.
48 */
dm_io_client_create(void)49 struct dm_io_client *dm_io_client_create(void)
50 {
51 struct dm_io_client *client;
52 unsigned int min_ios = dm_get_reserved_bio_based_ios();
53 int ret;
54
55 client = kzalloc(sizeof(*client), GFP_KERNEL);
56 if (!client)
57 return ERR_PTR(-ENOMEM);
58
59 ret = mempool_init_slab_pool(&client->pool, min_ios, _dm_io_cache);
60 if (ret)
61 goto bad;
62
63 ret = bioset_init(&client->bios, min_ios, 0, BIOSET_NEED_BVECS);
64 if (ret)
65 goto bad;
66
67 return client;
68
69 bad:
70 mempool_exit(&client->pool);
71 kfree(client);
72 return ERR_PTR(ret);
73 }
74 EXPORT_SYMBOL(dm_io_client_create);
75
dm_io_client_destroy(struct dm_io_client * client)76 void dm_io_client_destroy(struct dm_io_client *client)
77 {
78 mempool_exit(&client->pool);
79 bioset_exit(&client->bios);
80 kfree(client);
81 }
82 EXPORT_SYMBOL(dm_io_client_destroy);
83
84 /*
85 *-------------------------------------------------------------------
86 * We need to keep track of which region a bio is doing io for.
87 * To avoid a memory allocation to store just 5 or 6 bits, we
88 * ensure the 'struct io' pointer is aligned so enough low bits are
89 * always zero and then combine it with the region number directly in
90 * bi_private.
91 *-------------------------------------------------------------------
92 */
store_io_and_region_in_bio(struct bio * bio,struct io * io,unsigned int region)93 static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
94 unsigned int region)
95 {
96 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
97 DMCRIT("Unaligned struct io pointer %p", io);
98 BUG();
99 }
100
101 bio->bi_private = (void *)((unsigned long)io | region);
102 }
103
retrieve_io_and_region_from_bio(struct bio * bio,struct io ** io,unsigned int * region)104 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
105 unsigned int *region)
106 {
107 unsigned long val = (unsigned long)bio->bi_private;
108
109 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
110 *region = val & (DM_IO_MAX_REGIONS - 1);
111 }
112
113 /*
114 *--------------------------------------------------------------
115 * We need an io object to keep track of the number of bios that
116 * have been dispatched for a particular io.
117 *--------------------------------------------------------------
118 */
complete_io(struct io * io)119 static void complete_io(struct io *io)
120 {
121 unsigned long error_bits = io->error_bits;
122 io_notify_fn fn = io->callback;
123 void *context = io->context;
124
125 if (io->vma_invalidate_size)
126 invalidate_kernel_vmap_range(io->vma_invalidate_address,
127 io->vma_invalidate_size);
128
129 mempool_free(io, &io->client->pool);
130 fn(error_bits, context);
131 }
132
dec_count(struct io * io,unsigned int region,blk_status_t error)133 static void dec_count(struct io *io, unsigned int region, blk_status_t error)
134 {
135 if (error)
136 set_bit(region, &io->error_bits);
137
138 if (atomic_dec_and_test(&io->count))
139 complete_io(io);
140 }
141
endio(struct bio * bio)142 static void endio(struct bio *bio)
143 {
144 struct io *io;
145 unsigned int region;
146 blk_status_t error;
147
148 if (bio->bi_status && bio_data_dir(bio) == READ)
149 zero_fill_bio(bio);
150
151 /*
152 * The bio destructor in bio_put() may use the io object.
153 */
154 retrieve_io_and_region_from_bio(bio, &io, ®ion);
155
156 error = bio->bi_status;
157 bio_put(bio);
158
159 dec_count(io, region, error);
160 }
161
162 /*
163 *--------------------------------------------------------------
164 * These little objects provide an abstraction for getting a new
165 * destination page for io.
166 *--------------------------------------------------------------
167 */
168 struct dpages {
169 void (*get_page)(struct dpages *dp,
170 struct page **p, unsigned long *len, unsigned int *offset);
171 void (*next_page)(struct dpages *dp);
172
173 union {
174 unsigned int context_u;
175 struct bvec_iter context_bi;
176 };
177 void *context_ptr;
178
179 void *vma_invalidate_address;
180 unsigned long vma_invalidate_size;
181 };
182
183 /*
184 * Functions for getting the pages from a list.
185 */
list_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned int * offset)186 static void list_get_page(struct dpages *dp,
187 struct page **p, unsigned long *len, unsigned int *offset)
188 {
189 unsigned int o = dp->context_u;
190 struct page_list *pl = dp->context_ptr;
191
192 *p = pl->page;
193 *len = PAGE_SIZE - o;
194 *offset = o;
195 }
196
list_next_page(struct dpages * dp)197 static void list_next_page(struct dpages *dp)
198 {
199 struct page_list *pl = dp->context_ptr;
200
201 dp->context_ptr = pl->next;
202 dp->context_u = 0;
203 }
204
list_dp_init(struct dpages * dp,struct page_list * pl,unsigned int offset)205 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned int offset)
206 {
207 dp->get_page = list_get_page;
208 dp->next_page = list_next_page;
209 dp->context_u = offset;
210 dp->context_ptr = pl;
211 }
212
213 /*
214 * Functions for getting the pages from a bvec.
215 */
bio_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned int * offset)216 static void bio_get_page(struct dpages *dp, struct page **p,
217 unsigned long *len, unsigned int *offset)
218 {
219 struct bio_vec bvec = bvec_iter_bvec((struct bio_vec *)dp->context_ptr,
220 dp->context_bi);
221
222 *p = bvec.bv_page;
223 *len = bvec.bv_len;
224 *offset = bvec.bv_offset;
225
226 /* avoid figuring it out again in bio_next_page() */
227 dp->context_bi.bi_sector = (sector_t)bvec.bv_len;
228 }
229
bio_next_page(struct dpages * dp)230 static void bio_next_page(struct dpages *dp)
231 {
232 unsigned int len = (unsigned int)dp->context_bi.bi_sector;
233
234 bvec_iter_advance((struct bio_vec *)dp->context_ptr,
235 &dp->context_bi, len);
236 }
237
bio_dp_init(struct dpages * dp,struct bio * bio)238 static void bio_dp_init(struct dpages *dp, struct bio *bio)
239 {
240 dp->get_page = bio_get_page;
241 dp->next_page = bio_next_page;
242
243 /*
244 * We just use bvec iterator to retrieve pages, so it is ok to
245 * access the bvec table directly here
246 */
247 dp->context_ptr = bio->bi_io_vec;
248 dp->context_bi = bio->bi_iter;
249 }
250
251 /*
252 * Functions for getting the pages from a VMA.
253 */
vm_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned int * offset)254 static void vm_get_page(struct dpages *dp,
255 struct page **p, unsigned long *len, unsigned int *offset)
256 {
257 *p = vmalloc_to_page(dp->context_ptr);
258 *offset = dp->context_u;
259 *len = PAGE_SIZE - dp->context_u;
260 }
261
vm_next_page(struct dpages * dp)262 static void vm_next_page(struct dpages *dp)
263 {
264 dp->context_ptr += PAGE_SIZE - dp->context_u;
265 dp->context_u = 0;
266 }
267
vm_dp_init(struct dpages * dp,void * data)268 static void vm_dp_init(struct dpages *dp, void *data)
269 {
270 dp->get_page = vm_get_page;
271 dp->next_page = vm_next_page;
272 dp->context_u = offset_in_page(data);
273 dp->context_ptr = data;
274 }
275
276 /*
277 * Functions for getting the pages from kernel memory.
278 */
km_get_page(struct dpages * dp,struct page ** p,unsigned long * len,unsigned int * offset)279 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
280 unsigned int *offset)
281 {
282 *p = virt_to_page(dp->context_ptr);
283 *offset = dp->context_u;
284 *len = PAGE_SIZE - dp->context_u;
285 }
286
km_next_page(struct dpages * dp)287 static void km_next_page(struct dpages *dp)
288 {
289 dp->context_ptr += PAGE_SIZE - dp->context_u;
290 dp->context_u = 0;
291 }
292
km_dp_init(struct dpages * dp,void * data)293 static void km_dp_init(struct dpages *dp, void *data)
294 {
295 dp->get_page = km_get_page;
296 dp->next_page = km_next_page;
297 dp->context_u = offset_in_page(data);
298 dp->context_ptr = data;
299 }
300
301 /*
302 *---------------------------------------------------------------
303 * IO routines that accept a list of pages.
304 *---------------------------------------------------------------
305 */
do_region(const blk_opf_t opf,unsigned int region,struct dm_io_region * where,struct dpages * dp,struct io * io,unsigned short ioprio)306 static void do_region(const blk_opf_t opf, unsigned int region,
307 struct dm_io_region *where, struct dpages *dp,
308 struct io *io, unsigned short ioprio)
309 {
310 struct bio *bio;
311 struct page *page;
312 unsigned long len;
313 unsigned int offset;
314 unsigned int num_bvecs;
315 sector_t remaining = where->count;
316 struct request_queue *q = bdev_get_queue(where->bdev);
317 sector_t num_sectors;
318 unsigned int special_cmd_max_sectors;
319 const enum req_op op = opf & REQ_OP_MASK;
320
321 /*
322 * Reject unsupported discard and write same requests.
323 */
324 if (op == REQ_OP_DISCARD)
325 special_cmd_max_sectors = bdev_max_discard_sectors(where->bdev);
326 else if (op == REQ_OP_WRITE_ZEROES)
327 special_cmd_max_sectors = q->limits.max_write_zeroes_sectors;
328 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) &&
329 special_cmd_max_sectors == 0) {
330 atomic_inc(&io->count);
331 dec_count(io, region, BLK_STS_NOTSUPP);
332 return;
333 }
334
335 /*
336 * where->count may be zero if op holds a flush and we need to
337 * send a zero-sized flush.
338 */
339 do {
340 /*
341 * Allocate a suitably sized-bio.
342 */
343 switch (op) {
344 case REQ_OP_DISCARD:
345 case REQ_OP_WRITE_ZEROES:
346 num_bvecs = 0;
347 break;
348 default:
349 num_bvecs = bio_max_segs(dm_sector_div_up(remaining,
350 (PAGE_SIZE >> SECTOR_SHIFT)) + 1);
351 }
352
353 bio = bio_alloc_bioset(where->bdev, num_bvecs, opf, GFP_NOIO,
354 &io->client->bios);
355 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
356 bio->bi_end_io = endio;
357 bio->bi_ioprio = ioprio;
358 store_io_and_region_in_bio(bio, io, region);
359
360 if (op == REQ_OP_DISCARD || op == REQ_OP_WRITE_ZEROES) {
361 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
362 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
363 remaining -= num_sectors;
364 } else {
365 while (remaining) {
366 /*
367 * Try and add as many pages as possible.
368 */
369 dp->get_page(dp, &page, &len, &offset);
370 len = min(len, to_bytes(remaining));
371 if (!bio_add_page(bio, page, len, offset))
372 break;
373
374 offset = 0;
375 remaining -= to_sector(len);
376 dp->next_page(dp);
377 }
378 }
379
380 atomic_inc(&io->count);
381 submit_bio(bio);
382 } while (remaining);
383 }
384
dispatch_io(blk_opf_t opf,unsigned int num_regions,struct dm_io_region * where,struct dpages * dp,struct io * io,unsigned short ioprio)385 static void dispatch_io(blk_opf_t opf, unsigned int num_regions,
386 struct dm_io_region *where, struct dpages *dp,
387 struct io *io, unsigned short ioprio)
388 {
389 int i;
390 struct dpages old_pages = *dp;
391
392 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
393
394 /*
395 * For multiple regions we need to be careful to rewind
396 * the dp object for each call to do_region.
397 */
398 for (i = 0; i < num_regions; i++) {
399 *dp = old_pages;
400 if (where[i].count || (opf & REQ_PREFLUSH))
401 do_region(opf, i, where + i, dp, io, ioprio);
402 }
403
404 /*
405 * Drop the extra reference that we were holding to avoid
406 * the io being completed too early.
407 */
408 dec_count(io, 0, 0);
409 }
410
async_io(struct dm_io_client * client,unsigned int num_regions,struct dm_io_region * where,blk_opf_t opf,struct dpages * dp,io_notify_fn fn,void * context,unsigned short ioprio)411 static void async_io(struct dm_io_client *client, unsigned int num_regions,
412 struct dm_io_region *where, blk_opf_t opf,
413 struct dpages *dp, io_notify_fn fn, void *context,
414 unsigned short ioprio)
415 {
416 struct io *io;
417
418 io = mempool_alloc(&client->pool, GFP_NOIO);
419 io->error_bits = 0;
420 atomic_set(&io->count, 1); /* see dispatch_io() */
421 io->client = client;
422 io->callback = fn;
423 io->context = context;
424
425 io->vma_invalidate_address = dp->vma_invalidate_address;
426 io->vma_invalidate_size = dp->vma_invalidate_size;
427
428 dispatch_io(opf, num_regions, where, dp, io, ioprio);
429 }
430
431 struct sync_io {
432 unsigned long error_bits;
433 struct completion wait;
434 };
435
sync_io_complete(unsigned long error,void * context)436 static void sync_io_complete(unsigned long error, void *context)
437 {
438 struct sync_io *sio = context;
439
440 sio->error_bits = error;
441 complete(&sio->wait);
442 }
443
sync_io(struct dm_io_client * client,unsigned int num_regions,struct dm_io_region * where,blk_opf_t opf,struct dpages * dp,unsigned long * error_bits,unsigned short ioprio)444 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
445 struct dm_io_region *where, blk_opf_t opf, struct dpages *dp,
446 unsigned long *error_bits, unsigned short ioprio)
447 {
448 struct sync_io sio;
449
450 init_completion(&sio.wait);
451
452 async_io(client, num_regions, where, opf | REQ_SYNC, dp,
453 sync_io_complete, &sio, ioprio);
454
455 wait_for_completion_io(&sio.wait);
456
457 if (error_bits)
458 *error_bits = sio.error_bits;
459
460 return sio.error_bits ? -EIO : 0;
461 }
462
dp_init(struct dm_io_request * io_req,struct dpages * dp,unsigned long size)463 static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
464 unsigned long size)
465 {
466 /* Set up dpages based on memory type */
467
468 dp->vma_invalidate_address = NULL;
469 dp->vma_invalidate_size = 0;
470
471 switch (io_req->mem.type) {
472 case DM_IO_PAGE_LIST:
473 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
474 break;
475
476 case DM_IO_BIO:
477 bio_dp_init(dp, io_req->mem.ptr.bio);
478 break;
479
480 case DM_IO_VMA:
481 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
482 if ((io_req->bi_opf & REQ_OP_MASK) == REQ_OP_READ) {
483 dp->vma_invalidate_address = io_req->mem.ptr.vma;
484 dp->vma_invalidate_size = size;
485 }
486 vm_dp_init(dp, io_req->mem.ptr.vma);
487 break;
488
489 case DM_IO_KMEM:
490 km_dp_init(dp, io_req->mem.ptr.addr);
491 break;
492
493 default:
494 return -EINVAL;
495 }
496
497 return 0;
498 }
499
dm_io(struct dm_io_request * io_req,unsigned int num_regions,struct dm_io_region * where,unsigned long * sync_error_bits,unsigned short ioprio)500 int dm_io(struct dm_io_request *io_req, unsigned int num_regions,
501 struct dm_io_region *where, unsigned long *sync_error_bits,
502 unsigned short ioprio)
503 {
504 int r;
505 struct dpages dp;
506
507 if (num_regions > 1 && !op_is_write(io_req->bi_opf)) {
508 WARN_ON(1);
509 return -EIO;
510 }
511
512 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
513 if (r)
514 return r;
515
516 if (!io_req->notify.fn)
517 return sync_io(io_req->client, num_regions, where,
518 io_req->bi_opf, &dp, sync_error_bits, ioprio);
519
520 async_io(io_req->client, num_regions, where, io_req->bi_opf, &dp,
521 io_req->notify.fn, io_req->notify.context, ioprio);
522 return 0;
523 }
524 EXPORT_SYMBOL(dm_io);
525
dm_io_init(void)526 int __init dm_io_init(void)
527 {
528 _dm_io_cache = KMEM_CACHE(io, 0);
529 if (!_dm_io_cache)
530 return -ENOMEM;
531
532 return 0;
533 }
534
dm_io_exit(void)535 void dm_io_exit(void)
536 {
537 kmem_cache_destroy(_dm_io_cache);
538 _dm_io_cache = NULL;
539 }
540