xref: /linux/block/bio-integrity.c (revision e814f3fd16acfb7f9966773953de8f740a1e3202)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bio-integrity.c - bio data integrity extensions
4  *
5  * Copyright (C) 2007, 2008, 2009 Oracle Corporation
6  * Written by: Martin K. Petersen <martin.petersen@oracle.com>
7  */
8 
9 #include <linux/blk-integrity.h>
10 #include <linux/mempool.h>
11 #include <linux/export.h>
12 #include <linux/bio.h>
13 #include <linux/workqueue.h>
14 #include <linux/slab.h>
15 #include "blk.h"
16 
17 static struct kmem_cache *bip_slab;
18 static struct workqueue_struct *kintegrityd_wq;
19 
20 void blk_flush_integrity(void)
21 {
22 	flush_workqueue(kintegrityd_wq);
23 }
24 
25 /**
26  * bio_integrity_free - Free bio integrity payload
27  * @bio:	bio containing bip to be freed
28  *
29  * Description: Free the integrity portion of a bio.
30  */
31 void bio_integrity_free(struct bio *bio)
32 {
33 	struct bio_integrity_payload *bip = bio_integrity(bio);
34 	struct bio_set *bs = bio->bi_pool;
35 
36 	if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
37 		if (bip->bip_vec)
38 			bvec_free(&bs->bvec_integrity_pool, bip->bip_vec,
39 				  bip->bip_max_vcnt);
40 		mempool_free(bip, &bs->bio_integrity_pool);
41 	} else {
42 		kfree(bip);
43 	}
44 	bio->bi_integrity = NULL;
45 	bio->bi_opf &= ~REQ_INTEGRITY;
46 }
47 
48 /**
49  * bio_integrity_alloc - Allocate integrity payload and attach it to bio
50  * @bio:	bio to attach integrity metadata to
51  * @gfp_mask:	Memory allocation mask
52  * @nr_vecs:	Number of integrity metadata scatter-gather elements
53  *
54  * Description: This function prepares a bio for attaching integrity
55  * metadata.  nr_vecs specifies the maximum number of pages containing
56  * integrity metadata that can be attached.
57  */
58 struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
59 						  gfp_t gfp_mask,
60 						  unsigned int nr_vecs)
61 {
62 	struct bio_integrity_payload *bip;
63 	struct bio_set *bs = bio->bi_pool;
64 	unsigned inline_vecs;
65 
66 	if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
67 		return ERR_PTR(-EOPNOTSUPP);
68 
69 	if (!bs || !mempool_initialized(&bs->bio_integrity_pool)) {
70 		bip = kmalloc(struct_size(bip, bip_inline_vecs, nr_vecs), gfp_mask);
71 		inline_vecs = nr_vecs;
72 	} else {
73 		bip = mempool_alloc(&bs->bio_integrity_pool, gfp_mask);
74 		inline_vecs = BIO_INLINE_VECS;
75 	}
76 
77 	if (unlikely(!bip))
78 		return ERR_PTR(-ENOMEM);
79 
80 	memset(bip, 0, sizeof(*bip));
81 
82 	/* always report as many vecs as asked explicitly, not inline vecs */
83 	bip->bip_max_vcnt = nr_vecs;
84 	if (nr_vecs > inline_vecs) {
85 		bip->bip_vec = bvec_alloc(&bs->bvec_integrity_pool,
86 					  &bip->bip_max_vcnt, gfp_mask);
87 		if (!bip->bip_vec)
88 			goto err;
89 	} else if (nr_vecs) {
90 		bip->bip_vec = bip->bip_inline_vecs;
91 	}
92 
93 	bip->bip_bio = bio;
94 	bio->bi_integrity = bip;
95 	bio->bi_opf |= REQ_INTEGRITY;
96 
97 	return bip;
98 err:
99 	if (bs && mempool_initialized(&bs->bio_integrity_pool))
100 		mempool_free(bip, &bs->bio_integrity_pool);
101 	else
102 		kfree(bip);
103 	return ERR_PTR(-ENOMEM);
104 }
105 EXPORT_SYMBOL(bio_integrity_alloc);
106 
107 static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs,
108 				     bool dirty)
109 {
110 	int i;
111 
112 	for (i = 0; i < nr_vecs; i++) {
113 		if (dirty && !PageCompound(bv[i].bv_page))
114 			set_page_dirty_lock(bv[i].bv_page);
115 		unpin_user_page(bv[i].bv_page);
116 	}
117 }
118 
119 static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip)
120 {
121 	unsigned short orig_nr_vecs = bip->bip_max_vcnt - 1;
122 	struct bio_vec *orig_bvecs = &bip->bip_vec[1];
123 	struct bio_vec *bounce_bvec = &bip->bip_vec[0];
124 	size_t bytes = bounce_bvec->bv_len;
125 	struct iov_iter orig_iter;
126 	int ret;
127 
128 	iov_iter_bvec(&orig_iter, ITER_DEST, orig_bvecs, orig_nr_vecs, bytes);
129 	ret = copy_to_iter(bvec_virt(bounce_bvec), bytes, &orig_iter);
130 	WARN_ON_ONCE(ret != bytes);
131 
132 	bio_integrity_unpin_bvec(orig_bvecs, orig_nr_vecs, true);
133 }
134 
135 /**
136  * bio_integrity_unmap_user - Unmap user integrity payload
137  * @bio:	bio containing bip to be unmapped
138  *
139  * Unmap the user mapped integrity portion of a bio.
140  */
141 void bio_integrity_unmap_user(struct bio *bio)
142 {
143 	struct bio_integrity_payload *bip = bio_integrity(bio);
144 
145 	if (bip->bip_flags & BIP_COPY_USER) {
146 		if (bio_data_dir(bio) == READ)
147 			bio_integrity_uncopy_user(bip);
148 		kfree(bvec_virt(bip->bip_vec));
149 		return;
150 	}
151 
152 	bio_integrity_unpin_bvec(bip->bip_vec, bip->bip_max_vcnt,
153 			bio_data_dir(bio) == READ);
154 }
155 
156 /**
157  * bio_integrity_add_page - Attach integrity metadata
158  * @bio:	bio to update
159  * @page:	page containing integrity metadata
160  * @len:	number of bytes of integrity metadata in page
161  * @offset:	start offset within page
162  *
163  * Description: Attach a page containing integrity metadata to bio.
164  */
165 int bio_integrity_add_page(struct bio *bio, struct page *page,
166 			   unsigned int len, unsigned int offset)
167 {
168 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
169 	struct bio_integrity_payload *bip = bio_integrity(bio);
170 
171 	if (bip->bip_vcnt > 0) {
172 		struct bio_vec *bv = &bip->bip_vec[bip->bip_vcnt - 1];
173 		bool same_page = false;
174 
175 		if (bvec_try_merge_hw_page(q, bv, page, len, offset,
176 					   &same_page)) {
177 			bip->bip_iter.bi_size += len;
178 			return len;
179 		}
180 
181 		if (bip->bip_vcnt >=
182 		    min(bip->bip_max_vcnt, queue_max_integrity_segments(q)))
183 			return 0;
184 
185 		/*
186 		 * If the queue doesn't support SG gaps and adding this segment
187 		 * would create a gap, disallow it.
188 		 */
189 		if (bvec_gap_to_prev(&q->limits, bv, offset))
190 			return 0;
191 	}
192 
193 	bvec_set_page(&bip->bip_vec[bip->bip_vcnt], page, len, offset);
194 	bip->bip_vcnt++;
195 	bip->bip_iter.bi_size += len;
196 
197 	return len;
198 }
199 EXPORT_SYMBOL(bio_integrity_add_page);
200 
201 static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
202 				   int nr_vecs, unsigned int len,
203 				   unsigned int direction)
204 {
205 	bool write = direction == ITER_SOURCE;
206 	struct bio_integrity_payload *bip;
207 	struct iov_iter iter;
208 	void *buf;
209 	int ret;
210 
211 	buf = kmalloc(len, GFP_KERNEL);
212 	if (!buf)
213 		return -ENOMEM;
214 
215 	if (write) {
216 		iov_iter_bvec(&iter, direction, bvec, nr_vecs, len);
217 		if (!copy_from_iter_full(buf, len, &iter)) {
218 			ret = -EFAULT;
219 			goto free_buf;
220 		}
221 
222 		bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
223 	} else {
224 		memset(buf, 0, len);
225 
226 		/*
227 		 * We need to preserve the original bvec and the number of vecs
228 		 * in it for completion handling
229 		 */
230 		bip = bio_integrity_alloc(bio, GFP_KERNEL, nr_vecs + 1);
231 	}
232 
233 	if (IS_ERR(bip)) {
234 		ret = PTR_ERR(bip);
235 		goto free_buf;
236 	}
237 
238 	if (write)
239 		bio_integrity_unpin_bvec(bvec, nr_vecs, false);
240 	else
241 		memcpy(&bip->bip_vec[1], bvec, nr_vecs * sizeof(*bvec));
242 
243 	ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
244 				     offset_in_page(buf));
245 	if (ret != len) {
246 		ret = -ENOMEM;
247 		goto free_bip;
248 	}
249 
250 	bip->bip_flags |= BIP_COPY_USER;
251 	bip->bip_vcnt = nr_vecs;
252 	return 0;
253 free_bip:
254 	bio_integrity_free(bio);
255 free_buf:
256 	kfree(buf);
257 	return ret;
258 }
259 
260 static int bio_integrity_init_user(struct bio *bio, struct bio_vec *bvec,
261 				   int nr_vecs, unsigned int len)
262 {
263 	struct bio_integrity_payload *bip;
264 
265 	bip = bio_integrity_alloc(bio, GFP_KERNEL, nr_vecs);
266 	if (IS_ERR(bip))
267 		return PTR_ERR(bip);
268 
269 	memcpy(bip->bip_vec, bvec, nr_vecs * sizeof(*bvec));
270 	bip->bip_iter.bi_size = len;
271 	bip->bip_vcnt = nr_vecs;
272 	return 0;
273 }
274 
275 static unsigned int bvec_from_pages(struct bio_vec *bvec, struct page **pages,
276 				    int nr_vecs, ssize_t bytes, ssize_t offset)
277 {
278 	unsigned int nr_bvecs = 0;
279 	int i, j;
280 
281 	for (i = 0; i < nr_vecs; i = j) {
282 		size_t size = min_t(size_t, bytes, PAGE_SIZE - offset);
283 		struct folio *folio = page_folio(pages[i]);
284 
285 		bytes -= size;
286 		for (j = i + 1; j < nr_vecs; j++) {
287 			size_t next = min_t(size_t, PAGE_SIZE, bytes);
288 
289 			if (page_folio(pages[j]) != folio ||
290 			    pages[j] != pages[j - 1] + 1)
291 				break;
292 			unpin_user_page(pages[j]);
293 			size += next;
294 			bytes -= next;
295 		}
296 
297 		bvec_set_page(&bvec[nr_bvecs], pages[i], size, offset);
298 		offset = 0;
299 		nr_bvecs++;
300 	}
301 
302 	return nr_bvecs;
303 }
304 
305 int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
306 {
307 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
308 	unsigned int align = blk_lim_dma_alignment_and_pad(&q->limits);
309 	struct page *stack_pages[UIO_FASTIOV], **pages = stack_pages;
310 	struct bio_vec stack_vec[UIO_FASTIOV], *bvec = stack_vec;
311 	size_t offset, bytes = iter->count;
312 	unsigned int direction, nr_bvecs;
313 	int ret, nr_vecs;
314 	bool copy;
315 
316 	if (bio_integrity(bio))
317 		return -EINVAL;
318 	if (bytes >> SECTOR_SHIFT > queue_max_hw_sectors(q))
319 		return -E2BIG;
320 
321 	if (bio_data_dir(bio) == READ)
322 		direction = ITER_DEST;
323 	else
324 		direction = ITER_SOURCE;
325 
326 	nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS + 1);
327 	if (nr_vecs > BIO_MAX_VECS)
328 		return -E2BIG;
329 	if (nr_vecs > UIO_FASTIOV) {
330 		bvec = kcalloc(nr_vecs, sizeof(*bvec), GFP_KERNEL);
331 		if (!bvec)
332 			return -ENOMEM;
333 		pages = NULL;
334 	}
335 
336 	copy = !iov_iter_is_aligned(iter, align, align);
337 	ret = iov_iter_extract_pages(iter, &pages, bytes, nr_vecs, 0, &offset);
338 	if (unlikely(ret < 0))
339 		goto free_bvec;
340 
341 	nr_bvecs = bvec_from_pages(bvec, pages, nr_vecs, bytes, offset);
342 	if (pages != stack_pages)
343 		kvfree(pages);
344 	if (nr_bvecs > queue_max_integrity_segments(q))
345 		copy = true;
346 
347 	if (copy)
348 		ret = bio_integrity_copy_user(bio, bvec, nr_bvecs, bytes,
349 					      direction);
350 	else
351 		ret = bio_integrity_init_user(bio, bvec, nr_bvecs, bytes);
352 	if (ret)
353 		goto release_pages;
354 	if (bvec != stack_vec)
355 		kfree(bvec);
356 
357 	return 0;
358 
359 release_pages:
360 	bio_integrity_unpin_bvec(bvec, nr_bvecs, false);
361 free_bvec:
362 	if (bvec != stack_vec)
363 		kfree(bvec);
364 	return ret;
365 }
366 
367 static void bio_uio_meta_to_bip(struct bio *bio, struct uio_meta *meta)
368 {
369 	struct bio_integrity_payload *bip = bio_integrity(bio);
370 
371 	if (meta->flags & IO_INTEGRITY_CHK_GUARD)
372 		bip->bip_flags |= BIP_CHECK_GUARD;
373 	if (meta->flags & IO_INTEGRITY_CHK_APPTAG)
374 		bip->bip_flags |= BIP_CHECK_APPTAG;
375 	if (meta->flags & IO_INTEGRITY_CHK_REFTAG)
376 		bip->bip_flags |= BIP_CHECK_REFTAG;
377 
378 	bip->app_tag = meta->app_tag;
379 }
380 
381 int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta)
382 {
383 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
384 	unsigned int integrity_bytes;
385 	int ret;
386 	struct iov_iter it;
387 
388 	if (!bi)
389 		return -EINVAL;
390 	/*
391 	 * original meta iterator can be bigger.
392 	 * process integrity info corresponding to current data buffer only.
393 	 */
394 	it = meta->iter;
395 	integrity_bytes = bio_integrity_bytes(bi, bio_sectors(bio));
396 	if (it.count < integrity_bytes)
397 		return -EINVAL;
398 
399 	/* should fit into two bytes */
400 	BUILD_BUG_ON(IO_INTEGRITY_VALID_FLAGS >= (1 << 16));
401 
402 	if (meta->flags && (meta->flags & ~IO_INTEGRITY_VALID_FLAGS))
403 		return -EINVAL;
404 
405 	it.count = integrity_bytes;
406 	ret = bio_integrity_map_user(bio, &it);
407 	if (!ret) {
408 		bio_uio_meta_to_bip(bio, meta);
409 		bip_set_seed(bio_integrity(bio), meta->seed);
410 		iov_iter_advance(&meta->iter, integrity_bytes);
411 		meta->seed += bio_integrity_intervals(bi, bio_sectors(bio));
412 	}
413 	return ret;
414 }
415 
416 /**
417  * bio_integrity_prep - Prepare bio for integrity I/O
418  * @bio:	bio to prepare
419  *
420  * Description:  Checks if the bio already has an integrity payload attached.
421  * If it does, the payload has been generated by another kernel subsystem,
422  * and we just pass it through. Otherwise allocates integrity payload.
423  * The bio must have data direction, target device and start sector set priot
424  * to calling.  In the WRITE case, integrity metadata will be generated using
425  * the block device's integrity function.  In the READ case, the buffer
426  * will be prepared for DMA and a suitable end_io handler set up.
427  */
428 bool bio_integrity_prep(struct bio *bio)
429 {
430 	struct bio_integrity_payload *bip;
431 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
432 	unsigned int len;
433 	void *buf;
434 	gfp_t gfp = GFP_NOIO;
435 
436 	if (!bi)
437 		return true;
438 
439 	if (!bio_sectors(bio))
440 		return true;
441 
442 	/* Already protected? */
443 	if (bio_integrity(bio))
444 		return true;
445 
446 	switch (bio_op(bio)) {
447 	case REQ_OP_READ:
448 		if (bi->flags & BLK_INTEGRITY_NOVERIFY)
449 			return true;
450 		break;
451 	case REQ_OP_WRITE:
452 		if (bi->flags & BLK_INTEGRITY_NOGENERATE)
453 			return true;
454 
455 		/*
456 		 * Zero the memory allocated to not leak uninitialized kernel
457 		 * memory to disk for non-integrity metadata where nothing else
458 		 * initializes the memory.
459 		 */
460 		if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE)
461 			gfp |= __GFP_ZERO;
462 		break;
463 	default:
464 		return true;
465 	}
466 
467 	/* Allocate kernel buffer for protection data */
468 	len = bio_integrity_bytes(bi, bio_sectors(bio));
469 	buf = kmalloc(len, gfp);
470 	if (unlikely(buf == NULL)) {
471 		goto err_end_io;
472 	}
473 
474 	bip = bio_integrity_alloc(bio, GFP_NOIO, 1);
475 	if (IS_ERR(bip)) {
476 		kfree(buf);
477 		goto err_end_io;
478 	}
479 
480 	bip->bip_flags |= BIP_BLOCK_INTEGRITY;
481 	bip_set_seed(bip, bio->bi_iter.bi_sector);
482 
483 	if (bi->csum_type == BLK_INTEGRITY_CSUM_IP)
484 		bip->bip_flags |= BIP_IP_CHECKSUM;
485 
486 	/* describe what tags to check in payload */
487 	if (bi->csum_type)
488 		bip->bip_flags |= BIP_CHECK_GUARD;
489 	if (bi->flags & BLK_INTEGRITY_REF_TAG)
490 		bip->bip_flags |= BIP_CHECK_REFTAG;
491 	if (bio_integrity_add_page(bio, virt_to_page(buf), len,
492 			offset_in_page(buf)) < len) {
493 		printk(KERN_ERR "could not attach integrity payload\n");
494 		goto err_end_io;
495 	}
496 
497 	/* Auto-generate integrity metadata if this is a write */
498 	if (bio_data_dir(bio) == WRITE)
499 		blk_integrity_generate(bio);
500 	else
501 		bip->bio_iter = bio->bi_iter;
502 	return true;
503 
504 err_end_io:
505 	bio->bi_status = BLK_STS_RESOURCE;
506 	bio_endio(bio);
507 	return false;
508 }
509 EXPORT_SYMBOL(bio_integrity_prep);
510 
511 /**
512  * bio_integrity_verify_fn - Integrity I/O completion worker
513  * @work:	Work struct stored in bio to be verified
514  *
515  * Description: This workqueue function is called to complete a READ
516  * request.  The function verifies the transferred integrity metadata
517  * and then calls the original bio end_io function.
518  */
519 static void bio_integrity_verify_fn(struct work_struct *work)
520 {
521 	struct bio_integrity_payload *bip =
522 		container_of(work, struct bio_integrity_payload, bip_work);
523 	struct bio *bio = bip->bip_bio;
524 
525 	blk_integrity_verify(bio);
526 
527 	kfree(bvec_virt(bip->bip_vec));
528 	bio_integrity_free(bio);
529 	bio_endio(bio);
530 }
531 
532 /**
533  * __bio_integrity_endio - Integrity I/O completion function
534  * @bio:	Protected bio
535  *
536  * Description: Completion for integrity I/O
537  *
538  * Normally I/O completion is done in interrupt context.  However,
539  * verifying I/O integrity is a time-consuming task which must be run
540  * in process context.	This function postpones completion
541  * accordingly.
542  */
543 bool __bio_integrity_endio(struct bio *bio)
544 {
545 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
546 	struct bio_integrity_payload *bip = bio_integrity(bio);
547 
548 	if (bio_op(bio) == REQ_OP_READ && !bio->bi_status && bi->csum_type) {
549 		INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
550 		queue_work(kintegrityd_wq, &bip->bip_work);
551 		return false;
552 	}
553 
554 	kfree(bvec_virt(bip->bip_vec));
555 	bio_integrity_free(bio);
556 	return true;
557 }
558 
559 /**
560  * bio_integrity_advance - Advance integrity vector
561  * @bio:	bio whose integrity vector to update
562  * @bytes_done:	number of data bytes that have been completed
563  *
564  * Description: This function calculates how many integrity bytes the
565  * number of completed data bytes correspond to and advances the
566  * integrity vector accordingly.
567  */
568 void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
569 {
570 	struct bio_integrity_payload *bip = bio_integrity(bio);
571 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
572 	unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
573 
574 	bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9);
575 	bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
576 }
577 
578 /**
579  * bio_integrity_trim - Trim integrity vector
580  * @bio:	bio whose integrity vector to update
581  *
582  * Description: Used to trim the integrity vector in a cloned bio.
583  */
584 void bio_integrity_trim(struct bio *bio)
585 {
586 	struct bio_integrity_payload *bip = bio_integrity(bio);
587 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
588 
589 	bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
590 }
591 EXPORT_SYMBOL(bio_integrity_trim);
592 
593 /**
594  * bio_integrity_clone - Callback for cloning bios with integrity metadata
595  * @bio:	New bio
596  * @bio_src:	Original bio
597  * @gfp_mask:	Memory allocation mask
598  *
599  * Description:	Called to allocate a bip when cloning a bio
600  */
601 int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
602 			gfp_t gfp_mask)
603 {
604 	struct bio_integrity_payload *bip_src = bio_integrity(bio_src);
605 	struct bio_integrity_payload *bip;
606 
607 	BUG_ON(bip_src == NULL);
608 
609 	bip = bio_integrity_alloc(bio, gfp_mask, 0);
610 	if (IS_ERR(bip))
611 		return PTR_ERR(bip);
612 
613 	bip->bip_vec = bip_src->bip_vec;
614 	bip->bip_iter = bip_src->bip_iter;
615 	bip->bip_flags = bip_src->bip_flags & BIP_CLONE_FLAGS;
616 	bip->app_tag = bip_src->app_tag;
617 
618 	return 0;
619 }
620 
621 int bioset_integrity_create(struct bio_set *bs, int pool_size)
622 {
623 	if (mempool_initialized(&bs->bio_integrity_pool))
624 		return 0;
625 
626 	if (mempool_init_slab_pool(&bs->bio_integrity_pool,
627 				   pool_size, bip_slab))
628 		return -1;
629 
630 	if (biovec_init_pool(&bs->bvec_integrity_pool, pool_size)) {
631 		mempool_exit(&bs->bio_integrity_pool);
632 		return -1;
633 	}
634 
635 	return 0;
636 }
637 EXPORT_SYMBOL(bioset_integrity_create);
638 
639 void bioset_integrity_free(struct bio_set *bs)
640 {
641 	mempool_exit(&bs->bio_integrity_pool);
642 	mempool_exit(&bs->bvec_integrity_pool);
643 }
644 
645 void __init bio_integrity_init(void)
646 {
647 	/*
648 	 * kintegrityd won't block much but may burn a lot of CPU cycles.
649 	 * Make it highpri CPU intensive wq with max concurrency of 1.
650 	 */
651 	kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
652 					 WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
653 	if (!kintegrityd_wq)
654 		panic("Failed to create kintegrityd\n");
655 
656 	bip_slab = kmem_cache_create("bio_integrity_payload",
657 				     sizeof(struct bio_integrity_payload) +
658 				     sizeof(struct bio_vec) * BIO_INLINE_VECS,
659 				     0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
660 }
661