xref: /linux/block/bio-integrity.c (revision 6dfafbd0299a60bfb5d5e277fdf100037c7ded07)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bio-integrity.c - bio data integrity extensions
4  *
5  * Copyright (C) 2007, 2008, 2009 Oracle Corporation
6  * Written by: Martin K. Petersen <martin.petersen@oracle.com>
7  */
8 
9 #include <linux/blk-integrity.h>
10 #include "blk.h"
11 
12 struct bio_integrity_alloc {
13 	struct bio_integrity_payload	bip;
14 	struct bio_vec			bvecs[];
15 };
16 
17 static mempool_t integrity_buf_pool;
18 
19 void bio_integrity_alloc_buf(struct bio *bio, bool zero_buffer)
20 {
21 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
22 	struct bio_integrity_payload *bip = bio_integrity(bio);
23 	unsigned int len = bio_integrity_bytes(bi, bio_sectors(bio));
24 	gfp_t gfp = GFP_NOIO | (zero_buffer ? __GFP_ZERO : 0);
25 	void *buf;
26 
27 	buf = kmalloc(len, (gfp & ~__GFP_DIRECT_RECLAIM) |
28 			__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN);
29 	if (unlikely(!buf)) {
30 		struct page *page;
31 
32 		page = mempool_alloc(&integrity_buf_pool, GFP_NOFS);
33 		if (zero_buffer)
34 			memset(page_address(page), 0, len);
35 		bvec_set_page(&bip->bip_vec[0], page, len, 0);
36 		bip->bip_flags |= BIP_MEMPOOL;
37 	} else {
38 		bvec_set_page(&bip->bip_vec[0], virt_to_page(buf), len,
39 				offset_in_page(buf));
40 	}
41 
42 	bip->bip_vcnt = 1;
43 	bip->bip_iter.bi_size = len;
44 }
45 
46 void bio_integrity_free_buf(struct bio_integrity_payload *bip)
47 {
48 	struct bio_vec *bv = &bip->bip_vec[0];
49 
50 	if (bip->bip_flags & BIP_MEMPOOL)
51 		mempool_free(bv->bv_page, &integrity_buf_pool);
52 	else
53 		kfree(bvec_virt(bv));
54 }
55 
56 /**
57  * bio_integrity_free - Free bio integrity payload
58  * @bio:	bio containing bip to be freed
59  *
60  * Description: Free the integrity portion of a bio.
61  */
62 void bio_integrity_free(struct bio *bio)
63 {
64 	kfree(bio_integrity(bio));
65 	bio->bi_integrity = NULL;
66 	bio->bi_opf &= ~REQ_INTEGRITY;
67 }
68 
69 void bio_integrity_init(struct bio *bio, struct bio_integrity_payload *bip,
70 		struct bio_vec *bvecs, unsigned int nr_vecs)
71 {
72 	memset(bip, 0, sizeof(*bip));
73 	bip->bip_max_vcnt = nr_vecs;
74 	if (nr_vecs)
75 		bip->bip_vec = bvecs;
76 
77 	bio->bi_integrity = bip;
78 	bio->bi_opf |= REQ_INTEGRITY;
79 }
80 
81 /**
82  * bio_integrity_alloc - Allocate integrity payload and attach it to bio
83  * @bio:	bio to attach integrity metadata to
84  * @gfp_mask:	Memory allocation mask
85  * @nr_vecs:	Number of integrity metadata scatter-gather elements
86  *
87  * Description: This function prepares a bio for attaching integrity
88  * metadata.  nr_vecs specifies the maximum number of pages containing
89  * integrity metadata that can be attached.
90  */
91 struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
92 						  gfp_t gfp_mask,
93 						  unsigned int nr_vecs)
94 {
95 	struct bio_integrity_alloc *bia;
96 
97 	if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
98 		return ERR_PTR(-EOPNOTSUPP);
99 
100 	bia = kmalloc(struct_size(bia, bvecs, nr_vecs), gfp_mask);
101 	if (unlikely(!bia))
102 		return ERR_PTR(-ENOMEM);
103 	bio_integrity_init(bio, &bia->bip, bia->bvecs, nr_vecs);
104 	return &bia->bip;
105 }
106 EXPORT_SYMBOL(bio_integrity_alloc);
107 
108 static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs)
109 {
110 	int i;
111 
112 	for (i = 0; i < nr_vecs; i++)
113 		unpin_user_page(bv[i].bv_page);
114 }
115 
116 static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip)
117 {
118 	unsigned short orig_nr_vecs = bip->bip_max_vcnt - 1;
119 	struct bio_vec *orig_bvecs = &bip->bip_vec[1];
120 	struct bio_vec *bounce_bvec = &bip->bip_vec[0];
121 	size_t bytes = bounce_bvec->bv_len;
122 	struct iov_iter orig_iter;
123 	int ret;
124 
125 	iov_iter_bvec(&orig_iter, ITER_DEST, orig_bvecs, orig_nr_vecs, bytes);
126 	ret = copy_to_iter(bvec_virt(bounce_bvec), bytes, &orig_iter);
127 	WARN_ON_ONCE(ret != bytes);
128 
129 	bio_integrity_unpin_bvec(orig_bvecs, orig_nr_vecs);
130 }
131 
132 /**
133  * bio_integrity_unmap_user - Unmap user integrity payload
134  * @bio:	bio containing bip to be unmapped
135  *
136  * Unmap the user mapped integrity portion of a bio.
137  */
138 void bio_integrity_unmap_user(struct bio *bio)
139 {
140 	struct bio_integrity_payload *bip = bio_integrity(bio);
141 
142 	if (bip->bip_flags & BIP_COPY_USER) {
143 		if (bio_data_dir(bio) == READ)
144 			bio_integrity_uncopy_user(bip);
145 		kfree(bvec_virt(bip->bip_vec));
146 		return;
147 	}
148 
149 	bio_integrity_unpin_bvec(bip->bip_vec, bip->bip_max_vcnt);
150 }
151 
152 /**
153  * bio_integrity_add_page - Attach integrity metadata
154  * @bio:	bio to update
155  * @page:	page containing integrity metadata
156  * @len:	number of bytes of integrity metadata in page
157  * @offset:	start offset within page
158  *
159  * Description: Attach a page containing integrity metadata to bio.
160  */
161 int bio_integrity_add_page(struct bio *bio, struct page *page,
162 			   unsigned int len, unsigned int offset)
163 {
164 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
165 	struct bio_integrity_payload *bip = bio_integrity(bio);
166 
167 	if (bip->bip_vcnt > 0) {
168 		struct bio_vec *bv = &bip->bip_vec[bip->bip_vcnt - 1];
169 
170 		if (!zone_device_pages_have_same_pgmap(bv->bv_page, page))
171 			return 0;
172 
173 		if (bvec_try_merge_hw_page(q, bv, page, len, offset)) {
174 			bip->bip_iter.bi_size += len;
175 			return len;
176 		}
177 
178 		if (bip->bip_vcnt >=
179 		    min(bip->bip_max_vcnt, queue_max_integrity_segments(q)))
180 			return 0;
181 
182 		/*
183 		 * If the queue doesn't support SG gaps and adding this segment
184 		 * would create a gap, disallow it.
185 		 */
186 		if (bvec_gap_to_prev(&q->limits, bv, offset))
187 			return 0;
188 	}
189 
190 	bvec_set_page(&bip->bip_vec[bip->bip_vcnt], page, len, offset);
191 	bip->bip_vcnt++;
192 	bip->bip_iter.bi_size += len;
193 
194 	return len;
195 }
196 EXPORT_SYMBOL(bio_integrity_add_page);
197 
198 static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec,
199 				   int nr_vecs, unsigned int len)
200 {
201 	bool write = op_is_write(bio_op(bio));
202 	struct bio_integrity_payload *bip;
203 	struct iov_iter iter;
204 	void *buf;
205 	int ret;
206 
207 	buf = kmalloc(len, GFP_KERNEL);
208 	if (!buf)
209 		return -ENOMEM;
210 
211 	if (write) {
212 		iov_iter_bvec(&iter, ITER_SOURCE, bvec, nr_vecs, len);
213 		if (!copy_from_iter_full(buf, len, &iter)) {
214 			ret = -EFAULT;
215 			goto free_buf;
216 		}
217 
218 		bip = bio_integrity_alloc(bio, GFP_KERNEL, 1);
219 	} else {
220 		memset(buf, 0, len);
221 
222 		/*
223 		 * We need to preserve the original bvec and the number of vecs
224 		 * in it for completion handling
225 		 */
226 		bip = bio_integrity_alloc(bio, GFP_KERNEL, nr_vecs + 1);
227 	}
228 
229 	if (IS_ERR(bip)) {
230 		ret = PTR_ERR(bip);
231 		goto free_buf;
232 	}
233 
234 	if (write)
235 		bio_integrity_unpin_bvec(bvec, nr_vecs);
236 	else
237 		memcpy(&bip->bip_vec[1], bvec, nr_vecs * sizeof(*bvec));
238 
239 	ret = bio_integrity_add_page(bio, virt_to_page(buf), len,
240 				     offset_in_page(buf));
241 	if (ret != len) {
242 		ret = -ENOMEM;
243 		goto free_bip;
244 	}
245 
246 	bip->bip_flags |= BIP_COPY_USER;
247 	bip->bip_vcnt = nr_vecs;
248 	return 0;
249 free_bip:
250 	bio_integrity_free(bio);
251 free_buf:
252 	kfree(buf);
253 	return ret;
254 }
255 
256 static int bio_integrity_init_user(struct bio *bio, struct bio_vec *bvec,
257 				   int nr_vecs, unsigned int len)
258 {
259 	struct bio_integrity_payload *bip;
260 
261 	bip = bio_integrity_alloc(bio, GFP_KERNEL, nr_vecs);
262 	if (IS_ERR(bip))
263 		return PTR_ERR(bip);
264 
265 	memcpy(bip->bip_vec, bvec, nr_vecs * sizeof(*bvec));
266 	bip->bip_iter.bi_size = len;
267 	bip->bip_vcnt = nr_vecs;
268 	return 0;
269 }
270 
271 static unsigned int bvec_from_pages(struct bio_vec *bvec, struct page **pages,
272 				    int nr_vecs, ssize_t bytes, ssize_t offset,
273 				    bool *is_p2p)
274 {
275 	unsigned int nr_bvecs = 0;
276 	int i, j;
277 
278 	for (i = 0; i < nr_vecs; i = j) {
279 		size_t size = min_t(size_t, bytes, PAGE_SIZE - offset);
280 		struct folio *folio = page_folio(pages[i]);
281 
282 		bytes -= size;
283 		for (j = i + 1; j < nr_vecs; j++) {
284 			size_t next = min_t(size_t, PAGE_SIZE, bytes);
285 
286 			if (page_folio(pages[j]) != folio ||
287 			    pages[j] != pages[j - 1] + 1)
288 				break;
289 			unpin_user_page(pages[j]);
290 			size += next;
291 			bytes -= next;
292 		}
293 
294 		if (is_pci_p2pdma_page(pages[i]))
295 			*is_p2p = true;
296 
297 		bvec_set_page(&bvec[nr_bvecs], pages[i], size, offset);
298 		offset = 0;
299 		nr_bvecs++;
300 	}
301 
302 	return nr_bvecs;
303 }
304 
305 int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter)
306 {
307 	struct request_queue *q = bdev_get_queue(bio->bi_bdev);
308 	struct page *stack_pages[UIO_FASTIOV], **pages = stack_pages;
309 	struct bio_vec stack_vec[UIO_FASTIOV], *bvec = stack_vec;
310 	iov_iter_extraction_t extraction_flags = 0;
311 	size_t offset, bytes = iter->count;
312 	bool copy, is_p2p = false;
313 	unsigned int nr_bvecs;
314 	int ret, nr_vecs;
315 
316 	if (bio_integrity(bio))
317 		return -EINVAL;
318 	if (bytes >> SECTOR_SHIFT > queue_max_hw_sectors(q))
319 		return -E2BIG;
320 
321 	nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS + 1);
322 	if (nr_vecs > BIO_MAX_VECS)
323 		return -E2BIG;
324 	if (nr_vecs > UIO_FASTIOV) {
325 		bvec = kcalloc(nr_vecs, sizeof(*bvec), GFP_KERNEL);
326 		if (!bvec)
327 			return -ENOMEM;
328 		pages = NULL;
329 	}
330 
331 	copy = iov_iter_alignment(iter) &
332 			blk_lim_dma_alignment_and_pad(&q->limits);
333 
334 	if (blk_queue_pci_p2pdma(q))
335 		extraction_flags |= ITER_ALLOW_P2PDMA;
336 
337 	ret = iov_iter_extract_pages(iter, &pages, bytes, nr_vecs,
338 					extraction_flags, &offset);
339 	if (unlikely(ret < 0))
340 		goto free_bvec;
341 
342 	nr_bvecs = bvec_from_pages(bvec, pages, nr_vecs, bytes, offset,
343 				   &is_p2p);
344 	if (pages != stack_pages)
345 		kvfree(pages);
346 	if (nr_bvecs > queue_max_integrity_segments(q))
347 		copy = true;
348 	if (is_p2p)
349 		bio->bi_opf |= REQ_NOMERGE;
350 
351 	if (copy)
352 		ret = bio_integrity_copy_user(bio, bvec, nr_bvecs, bytes);
353 	else
354 		ret = bio_integrity_init_user(bio, bvec, nr_bvecs, bytes);
355 	if (ret)
356 		goto release_pages;
357 	if (bvec != stack_vec)
358 		kfree(bvec);
359 
360 	return 0;
361 
362 release_pages:
363 	bio_integrity_unpin_bvec(bvec, nr_bvecs);
364 free_bvec:
365 	if (bvec != stack_vec)
366 		kfree(bvec);
367 	return ret;
368 }
369 
370 static void bio_uio_meta_to_bip(struct bio *bio, struct uio_meta *meta)
371 {
372 	struct bio_integrity_payload *bip = bio_integrity(bio);
373 
374 	if (meta->flags & IO_INTEGRITY_CHK_GUARD)
375 		bip->bip_flags |= BIP_CHECK_GUARD;
376 	if (meta->flags & IO_INTEGRITY_CHK_APPTAG)
377 		bip->bip_flags |= BIP_CHECK_APPTAG;
378 	if (meta->flags & IO_INTEGRITY_CHK_REFTAG)
379 		bip->bip_flags |= BIP_CHECK_REFTAG;
380 
381 	bip->app_tag = meta->app_tag;
382 }
383 
384 int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta)
385 {
386 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
387 	unsigned int integrity_bytes;
388 	int ret;
389 	struct iov_iter it;
390 
391 	if (!bi)
392 		return -EINVAL;
393 	/*
394 	 * original meta iterator can be bigger.
395 	 * process integrity info corresponding to current data buffer only.
396 	 */
397 	it = meta->iter;
398 	integrity_bytes = bio_integrity_bytes(bi, bio_sectors(bio));
399 	if (it.count < integrity_bytes)
400 		return -EINVAL;
401 
402 	/* should fit into two bytes */
403 	BUILD_BUG_ON(IO_INTEGRITY_VALID_FLAGS >= (1 << 16));
404 
405 	if (meta->flags && (meta->flags & ~IO_INTEGRITY_VALID_FLAGS))
406 		return -EINVAL;
407 
408 	it.count = integrity_bytes;
409 	ret = bio_integrity_map_user(bio, &it);
410 	if (!ret) {
411 		bio_uio_meta_to_bip(bio, meta);
412 		bip_set_seed(bio_integrity(bio), meta->seed);
413 		iov_iter_advance(&meta->iter, integrity_bytes);
414 		meta->seed += bio_integrity_intervals(bi, bio_sectors(bio));
415 	}
416 	return ret;
417 }
418 
419 /**
420  * bio_integrity_advance - Advance integrity vector
421  * @bio:	bio whose integrity vector to update
422  * @bytes_done:	number of data bytes that have been completed
423  *
424  * Description: This function calculates how many integrity bytes the
425  * number of completed data bytes correspond to and advances the
426  * integrity vector accordingly.
427  */
428 void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
429 {
430 	struct bio_integrity_payload *bip = bio_integrity(bio);
431 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
432 	unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
433 
434 	bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9);
435 	bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
436 }
437 
438 /**
439  * bio_integrity_trim - Trim integrity vector
440  * @bio:	bio whose integrity vector to update
441  *
442  * Description: Used to trim the integrity vector in a cloned bio.
443  */
444 void bio_integrity_trim(struct bio *bio)
445 {
446 	struct bio_integrity_payload *bip = bio_integrity(bio);
447 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
448 
449 	bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
450 }
451 EXPORT_SYMBOL(bio_integrity_trim);
452 
453 /**
454  * bio_integrity_clone - Callback for cloning bios with integrity metadata
455  * @bio:	New bio
456  * @bio_src:	Original bio
457  * @gfp_mask:	Memory allocation mask
458  *
459  * Description:	Called to allocate a bip when cloning a bio
460  */
461 int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
462 			gfp_t gfp_mask)
463 {
464 	struct bio_integrity_payload *bip_src = bio_integrity(bio_src);
465 	struct bio_integrity_payload *bip;
466 
467 	BUG_ON(bip_src == NULL);
468 
469 	bip = bio_integrity_alloc(bio, gfp_mask, 0);
470 	if (IS_ERR(bip))
471 		return PTR_ERR(bip);
472 
473 	bip->bip_vec = bip_src->bip_vec;
474 	bip->bip_iter = bip_src->bip_iter;
475 	bip->bip_flags = bip_src->bip_flags & BIP_CLONE_FLAGS;
476 	bip->app_tag = bip_src->app_tag;
477 
478 	return 0;
479 }
480 
481 static int __init bio_integrity_initfn(void)
482 {
483 	if (mempool_init_page_pool(&integrity_buf_pool, BIO_POOL_SIZE,
484 			get_order(BLK_INTEGRITY_MAX_SIZE)))
485 		panic("bio: can't create integrity buf pool\n");
486 	return 0;
487 }
488 subsys_initcall(bio_integrity_initfn);
489