xref: /linux/block/bio-integrity.c (revision a06c3fad49a50d5d5eb078f93e70f4d3eca5d5a5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bio-integrity.c - bio data integrity extensions
4  *
5  * Copyright (C) 2007, 2008, 2009 Oracle Corporation
6  * Written by: Martin K. Petersen <martin.petersen@oracle.com>
7  */
8 
9 #include <linux/blk-integrity.h>
10 #include <linux/mempool.h>
11 #include <linux/export.h>
12 #include <linux/bio.h>
13 #include <linux/workqueue.h>
14 #include <linux/slab.h>
15 #include "blk.h"
16 
17 static struct kmem_cache *bip_slab;
18 static struct workqueue_struct *kintegrityd_wq;
19 
20 void blk_flush_integrity(void)
21 {
22 	flush_workqueue(kintegrityd_wq);
23 }
24 
25 /**
26  * bio_integrity_free - Free bio integrity payload
27  * @bio:	bio containing bip to be freed
28  *
29  * Description: Free the integrity portion of a bio.
30  */
31 void bio_integrity_free(struct bio *bio)
32 {
33 	struct bio_integrity_payload *bip = bio_integrity(bio);
34 	struct bio_set *bs = bio->bi_pool;
35 
36 	if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
37 		if (bip->bip_vec)
38 			bvec_free(&bs->bvec_integrity_pool, bip->bip_vec,
39 				  bip->bip_max_vcnt);
40 		mempool_free(bip, &bs->bio_integrity_pool);
41 	} else {
42 		kfree(bip);
43 	}
44 	bio->bi_integrity = NULL;
45 	bio->bi_opf &= ~REQ_INTEGRITY;
46 }
47 
48 /**
49  * bio_integrity_alloc - Allocate integrity payload and attach it to bio
50  * @bio:	bio to attach integrity metadata to
51  * @gfp_mask:	Memory allocation mask
52  * @nr_vecs:	Number of integrity metadata scatter-gather elements
53  *
54  * Description: This function prepares a bio for attaching integrity
55  * metadata.  nr_vecs specifies the maximum number of pages containing
56  * integrity metadata that can be attached.
57  */
58 struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
59 						  gfp_t gfp_mask,
60 						  unsigned int nr_vecs)
61 {
62 	struct bio_integrity_payload *bip;
63 	struct bio_set *bs = bio->bi_pool;
64 	unsigned inline_vecs;
65 
66 	if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
67 		return ERR_PTR(-EOPNOTSUPP);
68 
69 	if (!bs || !mempool_initialized(&bs->bio_integrity_pool)) {
70 		bip = kmalloc(struct_size(bip, bip_inline_vecs, nr_vecs), gfp_mask);
71 		inline_vecs = nr_vecs;
72 	} else {
73 		bip = mempool_alloc(&bs->bio_integrity_pool, gfp_mask);
74 		inline_vecs = BIO_INLINE_VECS;
75 	}
76 
77 	if (unlikely(!bip))
78 		return ERR_PTR(-ENOMEM);
79 
80 	memset(bip, 0, sizeof(*bip));
81 
82 	/* always report as many vecs as asked explicitly, not inline vecs */
83 	bip->bip_max_vcnt = nr_vecs;
84 	if (nr_vecs > inline_vecs) {
85 		bip->bip_vec = bvec_alloc(&bs->bvec_integrity_pool,
86 					  &bip->bip_max_vcnt, gfp_mask);
87 		if (!bip->bip_vec)
88 			goto err;
89 	} else if (nr_vecs) {
90 		bip->bip_vec = bip->bip_inline_vecs;
91 	}
92 
93 	bip->bip_bio = bio;
94 	bio->bi_integrity = bip;
95 	bio->bi_opf |= REQ_INTEGRITY;
96 
97 	return bip;
98 err:
99 	if (bs && mempool_initialized(&bs->bio_integrity_pool))
100 		mempool_free(bip, &bs->bio_integrity_pool);
101 	else
102 		kfree(bip);
103 	return ERR_PTR(-ENOMEM);
104 }
105 EXPORT_SYMBOL(bio_integrity_alloc);
106 
107 static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs,
108 				     bool dirty)
109 {
110 	int i;
111 
112 	for (i = 0; i < nr_vecs; i++) {
113 		if (dirty && !PageCompound(bv[i].bv_page))
114 			set_page_dirty_lock(bv[i].bv_page);
115 		unpin_user_page(bv[i].bv_page);
116 	}
117 }
118 
119 static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip)
120 {
121 	unsigned short nr_vecs = bip->bip_max_vcnt - 1;
122 	struct bio_vec *copy = &bip->bip_vec[1];
123 	size_t bytes = bip->bip_iter.bi_size;
124 	struct iov_iter iter;
125 	int ret;
126 
127 	iov_iter_bvec(&iter, ITER_DEST, copy, nr_vecs, bytes);
128 	ret = copy_to_iter(bvec_virt(bip->bip_vec), bytes, &iter);
129 	WARN_ON_ONCE(ret != bytes);
130 
131 	bio_integrity_unpin_bvec(copy, nr_vecs, true);
132 }
133 
134 /**
135  * bio_integrity_unmap_user - Unmap user integrity payload
136  * @bio:	bio containing bip to be unmapped
137  *
138  * Unmap the user mapped integrity portion of a bio.
139  */
140 void bio_integrity_unmap_user(struct bio *bio)
141 {
142 	struct bio_integrity_payload *bip = bio_integrity(bio);
143 
144 	if (bip->bip_flags & BIP_COPY_USER) {
145 		if (bio_data_dir(bio) == READ)
146 			bio_integrity_uncopy_user(bip);
147 		kfree(bvec_virt(bip->bip_vec));
148 		return;
149 	}
150 
151 	bio_integrity_unpin_bvec(bip->bip_vec, bip->bip_max_vcnt,
152 			bio_data_dir(bio) == READ);
153 }
154 
155 /**
156  * bio_integrity_add_page - Attach integrity metadata
157  * @bio:	bio to update
158  * @page:	page containing integrity metadata
159  * @len:	number of bytes of integrity metadata in page
160  * @offset:	start offset within page
161  *
162  * Description: Attach a page containing integrity metadata to bio.
163  */
164 int bio_integrity_add_page(struct bio *bio, struct page *page,
165 			   unsigned int len, unsigned int offset)
166 {
167 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
168 	struct bio_integrity_payload *bip = bio_integrity(bio);
169 
170 	if (((bip->bip_iter.bi_size + len) >> SECTOR_SHIFT) >
171 	    queue_max_hw_sectors(q))
172 		return 0;
173 
174 	if (bip->bip_vcnt > 0) {
175 		struct bio_vec *bv = &bip->bip_vec[bip->bip_vcnt - 1];
176 		bool same_page = false;
177 
178 		if (bvec_try_merge_hw_page(q, bv, page, len, offset,
179 					   &same_page)) {
180 			bip->bip_iter.bi_size += len;
181 			return len;
182 		}
183 
184 		if (bip->bip_vcnt >=
185 		    min(bip->bip_max_vcnt, queue_max_integrity_segments(q)))
186 			return 0;
187 
188 		/*
189 		 * If the queue doesn't support SG gaps and adding this segment
190 		 * would create a gap, disallow it.
191 		 */
192 		if (bvec_gap_to_prev(&q->limits, bv, offset))
193 			return 0;
194 	}
195 
196 	bvec_set_page(&bip->bip_vec[bip->bip_vcnt], page, len, offset);
197 	bip->bip_vcnt++;
198 	bip->bip_iter.bi_size += len;
199 
200 	return len;
201 }
202 EXPORT_SYMBOL(bio_integrity_add_page);
203 
204 static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
205 				   int nr_vecs, unsigned int len,
206 				   unsigned int direction, u32 seed)
207 {
208 	bool write = direction == ITER_SOURCE;
209 	struct bio_integrity_payload *bip;
210 	struct iov_iter iter;
211 	void *buf;
212 	int ret;
213 
214 	buf = kmalloc(len, GFP_KERNEL);
215 	if (!buf)
216 		return -ENOMEM;
217 
218 	if (write) {
219 		iov_iter_bvec(&iter, direction, bvec, nr_vecs, len);
220 		if (!copy_from_iter_full(buf, len, &iter)) {
221 			ret = -EFAULT;
222 			goto free_buf;
223 		}
224 
225 		bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
226 	} else {
227 		memset(buf, 0, len);
228 
229 		/*
230 		 * We need to preserve the original bvec and the number of vecs
231 		 * in it for completion handling
232 		 */
233 		bip = bio_integrity_alloc(bio, GFP_KERNEL, nr_vecs + 1);
234 	}
235 
236 	if (IS_ERR(bip)) {
237 		ret = PTR_ERR(bip);
238 		goto free_buf;
239 	}
240 
241 	if (write)
242 		bio_integrity_unpin_bvec(bvec, nr_vecs, false);
243 	else
244 		memcpy(&bip->bip_vec[1], bvec, nr_vecs * sizeof(*bvec));
245 
246 	ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
247 				     offset_in_page(buf));
248 	if (ret != len) {
249 		ret = -ENOMEM;
250 		goto free_bip;
251 	}
252 
253 	bip->bip_flags |= BIP_COPY_USER;
254 	bip->bip_iter.bi_sector = seed;
255 	bip->bip_vcnt = nr_vecs;
256 	return 0;
257 free_bip:
258 	bio_integrity_free(bio);
259 free_buf:
260 	kfree(buf);
261 	return ret;
262 }
263 
264 static int bio_integrity_init_user(struct bio *bio, struct bio_vec *bvec,
265 				   int nr_vecs, unsigned int len, u32 seed)
266 {
267 	struct bio_integrity_payload *bip;
268 
269 	bip = bio_integrity_alloc(bio, GFP_KERNEL, nr_vecs);
270 	if (IS_ERR(bip))
271 		return PTR_ERR(bip);
272 
273 	memcpy(bip->bip_vec, bvec, nr_vecs * sizeof(*bvec));
274 	bip->bip_iter.bi_sector = seed;
275 	bip->bip_iter.bi_size = len;
276 	bip->bip_vcnt = nr_vecs;
277 	return 0;
278 }
279 
280 static unsigned int bvec_from_pages(struct bio_vec *bvec, struct page **pages,
281 				    int nr_vecs, ssize_t bytes, ssize_t offset)
282 {
283 	unsigned int nr_bvecs = 0;
284 	int i, j;
285 
286 	for (i = 0; i < nr_vecs; i = j) {
287 		size_t size = min_t(size_t, bytes, PAGE_SIZE - offset);
288 		struct folio *folio = page_folio(pages[i]);
289 
290 		bytes -= size;
291 		for (j = i + 1; j < nr_vecs; j++) {
292 			size_t next = min_t(size_t, PAGE_SIZE, bytes);
293 
294 			if (page_folio(pages[j]) != folio ||
295 			    pages[j] != pages[j - 1] + 1)
296 				break;
297 			unpin_user_page(pages[j]);
298 			size += next;
299 			bytes -= next;
300 		}
301 
302 		bvec_set_page(&bvec[nr_bvecs], pages[i], size, offset);
303 		offset = 0;
304 		nr_bvecs++;
305 	}
306 
307 	return nr_bvecs;
308 }
309 
310 int bio_integrity_map_user(struct bio *bio, void __user *ubuf, ssize_t bytes,
311 			   u32 seed)
312 {
313 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
314 	unsigned int align = blk_lim_dma_alignment_and_pad(&q->limits);
315 	struct page *stack_pages[UIO_FASTIOV], **pages = stack_pages;
316 	struct bio_vec stack_vec[UIO_FASTIOV], *bvec = stack_vec;
317 	unsigned int direction, nr_bvecs;
318 	struct iov_iter iter;
319 	int ret, nr_vecs;
320 	size_t offset;
321 	bool copy;
322 
323 	if (bio_integrity(bio))
324 		return -EINVAL;
325 	if (bytes >> SECTOR_SHIFT > queue_max_hw_sectors(q))
326 		return -E2BIG;
327 
328 	if (bio_data_dir(bio) == READ)
329 		direction = ITER_DEST;
330 	else
331 		direction = ITER_SOURCE;
332 
333 	iov_iter_ubuf(&iter, direction, ubuf, bytes);
334 	nr_vecs = iov_iter_npages(&iter, BIO_MAX_VECS + 1);
335 	if (nr_vecs > BIO_MAX_VECS)
336 		return -E2BIG;
337 	if (nr_vecs > UIO_FASTIOV) {
338 		bvec = kcalloc(nr_vecs, sizeof(*bvec), GFP_KERNEL);
339 		if (!bvec)
340 			return -ENOMEM;
341 		pages = NULL;
342 	}
343 
344 	copy = !iov_iter_is_aligned(&iter, align, align);
345 	ret = iov_iter_extract_pages(&iter, &pages, bytes, nr_vecs, 0, &offset);
346 	if (unlikely(ret < 0))
347 		goto free_bvec;
348 
349 	nr_bvecs = bvec_from_pages(bvec, pages, nr_vecs, bytes, offset);
350 	if (pages != stack_pages)
351 		kvfree(pages);
352 	if (nr_bvecs > queue_max_integrity_segments(q))
353 		copy = true;
354 
355 	if (copy)
356 		ret = bio_integrity_copy_user(bio, bvec, nr_bvecs, bytes,
357 					      direction, seed);
358 	else
359 		ret = bio_integrity_init_user(bio, bvec, nr_bvecs, bytes, seed);
360 	if (ret)
361 		goto release_pages;
362 	if (bvec != stack_vec)
363 		kfree(bvec);
364 
365 	return 0;
366 
367 release_pages:
368 	bio_integrity_unpin_bvec(bvec, nr_bvecs, false);
369 free_bvec:
370 	if (bvec != stack_vec)
371 		kfree(bvec);
372 	return ret;
373 }
374 EXPORT_SYMBOL_GPL(bio_integrity_map_user);
375 
376 /**
377  * bio_integrity_prep - Prepare bio for integrity I/O
378  * @bio:	bio to prepare
379  *
380  * Description:  Checks if the bio already has an integrity payload attached.
381  * If it does, the payload has been generated by another kernel subsystem,
382  * and we just pass it through. Otherwise allocates integrity payload.
383  * The bio must have data direction, target device and start sector set priot
384  * to calling.  In the WRITE case, integrity metadata will be generated using
385  * the block device's integrity function.  In the READ case, the buffer
386  * will be prepared for DMA and a suitable end_io handler set up.
387  */
388 bool bio_integrity_prep(struct bio *bio)
389 {
390 	struct bio_integrity_payload *bip;
391 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
392 	unsigned int len;
393 	void *buf;
394 	gfp_t gfp = GFP_NOIO;
395 
396 	if (!bi)
397 		return true;
398 
399 	if (!bio_sectors(bio))
400 		return true;
401 
402 	/* Already protected? */
403 	if (bio_integrity(bio))
404 		return true;
405 
406 	switch (bio_op(bio)) {
407 	case REQ_OP_READ:
408 		if (bi->flags & BLK_INTEGRITY_NOVERIFY)
409 			return true;
410 		break;
411 	case REQ_OP_WRITE:
412 		if (bi->flags & BLK_INTEGRITY_NOGENERATE)
413 			return true;
414 
415 		/*
416 		 * Zero the memory allocated to not leak uninitialized kernel
417 		 * memory to disk for non-integrity metadata where nothing else
418 		 * initializes the memory.
419 		 */
420 		if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE)
421 			gfp |= __GFP_ZERO;
422 		break;
423 	default:
424 		return true;
425 	}
426 
427 	/* Allocate kernel buffer for protection data */
428 	len = bio_integrity_bytes(bi, bio_sectors(bio));
429 	buf = kmalloc(len, gfp);
430 	if (unlikely(buf == NULL)) {
431 		goto err_end_io;
432 	}
433 
434 	bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
435 	if (IS_ERR(bip)) {
436 		kfree(buf);
437 		goto err_end_io;
438 	}
439 
440 	bip->bip_flags |= BIP_BLOCK_INTEGRITY;
441 	bip_set_seed(bip, bio->bi_iter.bi_sector);
442 
443 	if (bi->csum_type == BLK_INTEGRITY_CSUM_IP)
444 		bip->bip_flags |= BIP_IP_CHECKSUM;
445 
446 	if (bio_integrity_add_page(bio, virt_to_page(buf), len,
447 			offset_in_page(buf)) < len) {
448 		printk(KERN_ERR "could not attach integrity payload\n");
449 		goto err_end_io;
450 	}
451 
452 	/* Auto-generate integrity metadata if this is a write */
453 	if (bio_data_dir(bio) == WRITE)
454 		blk_integrity_generate(bio);
455 	else
456 		bip->bio_iter = bio->bi_iter;
457 	return true;
458 
459 err_end_io:
460 	bio->bi_status = BLK_STS_RESOURCE;
461 	bio_endio(bio);
462 	return false;
463 }
464 EXPORT_SYMBOL(bio_integrity_prep);
465 
466 /**
467  * bio_integrity_verify_fn - Integrity I/O completion worker
468  * @work:	Work struct stored in bio to be verified
469  *
470  * Description: This workqueue function is called to complete a READ
471  * request.  The function verifies the transferred integrity metadata
472  * and then calls the original bio end_io function.
473  */
474 static void bio_integrity_verify_fn(struct work_struct *work)
475 {
476 	struct bio_integrity_payload *bip =
477 		container_of(work, struct bio_integrity_payload, bip_work);
478 	struct bio *bio = bip->bip_bio;
479 
480 	blk_integrity_verify(bio);
481 
482 	kfree(bvec_virt(bip->bip_vec));
483 	bio_integrity_free(bio);
484 	bio_endio(bio);
485 }
486 
487 /**
488  * __bio_integrity_endio - Integrity I/O completion function
489  * @bio:	Protected bio
490  *
491  * Description: Completion for integrity I/O
492  *
493  * Normally I/O completion is done in interrupt context.  However,
494  * verifying I/O integrity is a time-consuming task which must be run
495  * in process context.	This function postpones completion
496  * accordingly.
497  */
498 bool __bio_integrity_endio(struct bio *bio)
499 {
500 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
501 	struct bio_integrity_payload *bip = bio_integrity(bio);
502 
503 	if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && bi->csum_type) {
504 		INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
505 		queue_work(kintegrityd_wq, &bip->bip_work);
506 		return false;
507 	}
508 
509 	kfree(bvec_virt(bip->bip_vec));
510 	bio_integrity_free(bio);
511 	return true;
512 }
513 
514 /**
515  * bio_integrity_advance - Advance integrity vector
516  * @bio:	bio whose integrity vector to update
517  * @bytes_done:	number of data bytes that have been completed
518  *
519  * Description: This function calculates how many integrity bytes the
520  * number of completed data bytes correspond to and advances the
521  * integrity vector accordingly.
522  */
523 void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
524 {
525 	struct bio_integrity_payload *bip = bio_integrity(bio);
526 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
527 	unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
528 
529 	bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9);
530 	bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
531 }
532 
533 /**
534  * bio_integrity_trim - Trim integrity vector
535  * @bio:	bio whose integrity vector to update
536  *
537  * Description: Used to trim the integrity vector in a cloned bio.
538  */
539 void bio_integrity_trim(struct bio *bio)
540 {
541 	struct bio_integrity_payload *bip = bio_integrity(bio);
542 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
543 
544 	bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
545 }
546 EXPORT_SYMBOL(bio_integrity_trim);
547 
548 /**
549  * bio_integrity_clone - Callback for cloning bios with integrity metadata
550  * @bio:	New bio
551  * @bio_src:	Original bio
552  * @gfp_mask:	Memory allocation mask
553  *
554  * Description:	Called to allocate a bip when cloning a bio
555  */
556 int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
557 			gfp_t gfp_mask)
558 {
559 	struct bio_integrity_payload *bip_src = bio_integrity(bio_src);
560 	struct bio_integrity_payload *bip;
561 
562 	BUG_ON(bip_src == NULL);
563 
564 	bip = bio_integrity_alloc(bio, gfp_mask, 0);
565 	if (IS_ERR(bip))
566 		return PTR_ERR(bip);
567 
568 	bip->bip_vec = bip_src->bip_vec;
569 	bip->bip_iter = bip_src->bip_iter;
570 	bip->bip_flags = bip_src->bip_flags & ~BIP_BLOCK_INTEGRITY;
571 
572 	return 0;
573 }
574 
575 int bioset_integrity_create(struct bio_set *bs, int pool_size)
576 {
577 	if (mempool_initialized(&bs->bio_integrity_pool))
578 		return 0;
579 
580 	if (mempool_init_slab_pool(&bs->bio_integrity_pool,
581 				   pool_size, bip_slab))
582 		return -1;
583 
584 	if (biovec_init_pool(&bs->bvec_integrity_pool, pool_size)) {
585 		mempool_exit(&bs->bio_integrity_pool);
586 		return -1;
587 	}
588 
589 	return 0;
590 }
591 EXPORT_SYMBOL(bioset_integrity_create);
592 
593 void bioset_integrity_free(struct bio_set *bs)
594 {
595 	mempool_exit(&bs->bio_integrity_pool);
596 	mempool_exit(&bs->bvec_integrity_pool);
597 }
598 
599 void __init bio_integrity_init(void)
600 {
601 	/*
602 	 * kintegrityd won't block much but may burn a lot of CPU cycles.
603 	 * Make it highpri CPU intensive wq with max concurrency of 1.
604 	 */
605 	kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
606 					 WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
607 	if (!kintegrityd_wq)
608 		panic("Failed to create kintegrityd\n");
609 
610 	bip_slab = kmem_cache_create("bio_integrity_payload",
611 				     sizeof(struct bio_integrity_payload) +
612 				     sizeof(struct bio_vec) * BIO_INLINE_VECS,
613 				     0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
614 }
615