1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * bio-integrity.c - bio data integrity extensions 4 * 5 * Copyright (C) 2007, 2008, 2009 Oracle Corporation 6 * Written by: Martin K. Petersen <martin.petersen@oracle.com> 7 */ 8 9 #include <linux/blk-integrity.h> 10 #include "blk.h" 11 12 struct bio_integrity_alloc { 13 struct bio_integrity_payload bip; 14 struct bio_vec bvecs[]; 15 }; 16 17 /** 18 * bio_integrity_free - Free bio integrity payload 19 * @bio: bio containing bip to be freed 20 * 21 * Description: Free the integrity portion of a bio. 22 */ 23 void bio_integrity_free(struct bio *bio) 24 { 25 kfree(bio_integrity(bio)); 26 bio->bi_integrity = NULL; 27 bio->bi_opf &= ~REQ_INTEGRITY; 28 } 29 30 void bio_integrity_init(struct bio *bio, struct bio_integrity_payload *bip, 31 struct bio_vec *bvecs, unsigned int nr_vecs) 32 { 33 memset(bip, 0, sizeof(*bip)); 34 bip->bip_max_vcnt = nr_vecs; 35 if (nr_vecs) 36 bip->bip_vec = bvecs; 37 38 bio->bi_integrity = bip; 39 bio->bi_opf |= REQ_INTEGRITY; 40 } 41 42 /** 43 * bio_integrity_alloc - Allocate integrity payload and attach it to bio 44 * @bio: bio to attach integrity metadata to 45 * @gfp_mask: Memory allocation mask 46 * @nr_vecs: Number of integrity metadata scatter-gather elements 47 * 48 * Description: This function prepares a bio for attaching integrity 49 * metadata. nr_vecs specifies the maximum number of pages containing 50 * integrity metadata that can be attached. 51 */ 52 struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, 53 gfp_t gfp_mask, 54 unsigned int nr_vecs) 55 { 56 struct bio_integrity_alloc *bia; 57 58 if (WARN_ON_ONCE(bio_has_crypt_ctx(bio))) 59 return ERR_PTR(-EOPNOTSUPP); 60 61 bia = kmalloc(struct_size(bia, bvecs, nr_vecs), gfp_mask); 62 if (unlikely(!bia)) 63 return ERR_PTR(-ENOMEM); 64 bio_integrity_init(bio, &bia->bip, bia->bvecs, nr_vecs); 65 return &bia->bip; 66 } 67 EXPORT_SYMBOL(bio_integrity_alloc); 68 69 static void bio_integrity_unpin_bvec(struct bio_vec *bv, int nr_vecs, 70 bool dirty) 71 { 72 int i; 73 74 for (i = 0; i < nr_vecs; i++) { 75 if (dirty && !PageCompound(bv[i].bv_page)) 76 set_page_dirty_lock(bv[i].bv_page); 77 unpin_user_page(bv[i].bv_page); 78 } 79 } 80 81 static void bio_integrity_uncopy_user(struct bio_integrity_payload *bip) 82 { 83 unsigned short orig_nr_vecs = bip->bip_max_vcnt - 1; 84 struct bio_vec *orig_bvecs = &bip->bip_vec[1]; 85 struct bio_vec *bounce_bvec = &bip->bip_vec[0]; 86 size_t bytes = bounce_bvec->bv_len; 87 struct iov_iter orig_iter; 88 int ret; 89 90 iov_iter_bvec(&orig_iter, ITER_DEST, orig_bvecs, orig_nr_vecs, bytes); 91 ret = copy_to_iter(bvec_virt(bounce_bvec), bytes, &orig_iter); 92 WARN_ON_ONCE(ret != bytes); 93 94 bio_integrity_unpin_bvec(orig_bvecs, orig_nr_vecs, true); 95 } 96 97 /** 98 * bio_integrity_unmap_user - Unmap user integrity payload 99 * @bio: bio containing bip to be unmapped 100 * 101 * Unmap the user mapped integrity portion of a bio. 102 */ 103 void bio_integrity_unmap_user(struct bio *bio) 104 { 105 struct bio_integrity_payload *bip = bio_integrity(bio); 106 107 if (bip->bip_flags & BIP_COPY_USER) { 108 if (bio_data_dir(bio) == READ) 109 bio_integrity_uncopy_user(bip); 110 kfree(bvec_virt(bip->bip_vec)); 111 return; 112 } 113 114 bio_integrity_unpin_bvec(bip->bip_vec, bip->bip_max_vcnt, 115 bio_data_dir(bio) == READ); 116 } 117 118 /** 119 * bio_integrity_add_page - Attach integrity metadata 120 * @bio: bio to update 121 * @page: page containing integrity metadata 122 * @len: number of bytes of integrity metadata in page 123 * @offset: start offset within page 124 * 125 * Description: Attach a page containing integrity metadata to bio. 126 */ 127 int bio_integrity_add_page(struct bio *bio, struct page *page, 128 unsigned int len, unsigned int offset) 129 { 130 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 131 struct bio_integrity_payload *bip = bio_integrity(bio); 132 133 if (bip->bip_vcnt > 0) { 134 struct bio_vec *bv = &bip->bip_vec[bip->bip_vcnt - 1]; 135 bool same_page = false; 136 137 if (bvec_try_merge_hw_page(q, bv, page, len, offset, 138 &same_page)) { 139 bip->bip_iter.bi_size += len; 140 return len; 141 } 142 143 if (bip->bip_vcnt >= 144 min(bip->bip_max_vcnt, queue_max_integrity_segments(q))) 145 return 0; 146 147 /* 148 * If the queue doesn't support SG gaps and adding this segment 149 * would create a gap, disallow it. 150 */ 151 if (bvec_gap_to_prev(&q->limits, bv, offset)) 152 return 0; 153 } 154 155 bvec_set_page(&bip->bip_vec[bip->bip_vcnt], page, len, offset); 156 bip->bip_vcnt++; 157 bip->bip_iter.bi_size += len; 158 159 return len; 160 } 161 EXPORT_SYMBOL(bio_integrity_add_page); 162 163 static int bio_integrity_copy_user(struct bio *bio, struct bio_vec *bvec, 164 int nr_vecs, unsigned int len, 165 unsigned int direction) 166 { 167 bool write = direction == ITER_SOURCE; 168 struct bio_integrity_payload *bip; 169 struct iov_iter iter; 170 void *buf; 171 int ret; 172 173 buf = kmalloc(len, GFP_KERNEL); 174 if (!buf) 175 return -ENOMEM; 176 177 if (write) { 178 iov_iter_bvec(&iter, direction, bvec, nr_vecs, len); 179 if (!copy_from_iter_full(buf, len, &iter)) { 180 ret = -EFAULT; 181 goto free_buf; 182 } 183 184 bip = bio_integrity_alloc(bio, GFP_KERNEL, 1); 185 } else { 186 memset(buf, 0, len); 187 188 /* 189 * We need to preserve the original bvec and the number of vecs 190 * in it for completion handling 191 */ 192 bip = bio_integrity_alloc(bio, GFP_KERNEL, nr_vecs + 1); 193 } 194 195 if (IS_ERR(bip)) { 196 ret = PTR_ERR(bip); 197 goto free_buf; 198 } 199 200 if (write) 201 bio_integrity_unpin_bvec(bvec, nr_vecs, false); 202 else 203 memcpy(&bip->bip_vec[1], bvec, nr_vecs * sizeof(*bvec)); 204 205 ret = bio_integrity_add_page(bio, virt_to_page(buf), len, 206 offset_in_page(buf)); 207 if (ret != len) { 208 ret = -ENOMEM; 209 goto free_bip; 210 } 211 212 bip->bip_flags |= BIP_COPY_USER; 213 bip->bip_vcnt = nr_vecs; 214 return 0; 215 free_bip: 216 bio_integrity_free(bio); 217 free_buf: 218 kfree(buf); 219 return ret; 220 } 221 222 static int bio_integrity_init_user(struct bio *bio, struct bio_vec *bvec, 223 int nr_vecs, unsigned int len) 224 { 225 struct bio_integrity_payload *bip; 226 227 bip = bio_integrity_alloc(bio, GFP_KERNEL, nr_vecs); 228 if (IS_ERR(bip)) 229 return PTR_ERR(bip); 230 231 memcpy(bip->bip_vec, bvec, nr_vecs * sizeof(*bvec)); 232 bip->bip_iter.bi_size = len; 233 bip->bip_vcnt = nr_vecs; 234 return 0; 235 } 236 237 static unsigned int bvec_from_pages(struct bio_vec *bvec, struct page **pages, 238 int nr_vecs, ssize_t bytes, ssize_t offset) 239 { 240 unsigned int nr_bvecs = 0; 241 int i, j; 242 243 for (i = 0; i < nr_vecs; i = j) { 244 size_t size = min_t(size_t, bytes, PAGE_SIZE - offset); 245 struct folio *folio = page_folio(pages[i]); 246 247 bytes -= size; 248 for (j = i + 1; j < nr_vecs; j++) { 249 size_t next = min_t(size_t, PAGE_SIZE, bytes); 250 251 if (page_folio(pages[j]) != folio || 252 pages[j] != pages[j - 1] + 1) 253 break; 254 unpin_user_page(pages[j]); 255 size += next; 256 bytes -= next; 257 } 258 259 bvec_set_page(&bvec[nr_bvecs], pages[i], size, offset); 260 offset = 0; 261 nr_bvecs++; 262 } 263 264 return nr_bvecs; 265 } 266 267 int bio_integrity_map_user(struct bio *bio, struct iov_iter *iter) 268 { 269 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 270 unsigned int align = blk_lim_dma_alignment_and_pad(&q->limits); 271 struct page *stack_pages[UIO_FASTIOV], **pages = stack_pages; 272 struct bio_vec stack_vec[UIO_FASTIOV], *bvec = stack_vec; 273 size_t offset, bytes = iter->count; 274 unsigned int direction, nr_bvecs; 275 int ret, nr_vecs; 276 bool copy; 277 278 if (bio_integrity(bio)) 279 return -EINVAL; 280 if (bytes >> SECTOR_SHIFT > queue_max_hw_sectors(q)) 281 return -E2BIG; 282 283 if (bio_data_dir(bio) == READ) 284 direction = ITER_DEST; 285 else 286 direction = ITER_SOURCE; 287 288 nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS + 1); 289 if (nr_vecs > BIO_MAX_VECS) 290 return -E2BIG; 291 if (nr_vecs > UIO_FASTIOV) { 292 bvec = kcalloc(nr_vecs, sizeof(*bvec), GFP_KERNEL); 293 if (!bvec) 294 return -ENOMEM; 295 pages = NULL; 296 } 297 298 copy = !iov_iter_is_aligned(iter, align, align); 299 ret = iov_iter_extract_pages(iter, &pages, bytes, nr_vecs, 0, &offset); 300 if (unlikely(ret < 0)) 301 goto free_bvec; 302 303 nr_bvecs = bvec_from_pages(bvec, pages, nr_vecs, bytes, offset); 304 if (pages != stack_pages) 305 kvfree(pages); 306 if (nr_bvecs > queue_max_integrity_segments(q)) 307 copy = true; 308 309 if (copy) 310 ret = bio_integrity_copy_user(bio, bvec, nr_bvecs, bytes, 311 direction); 312 else 313 ret = bio_integrity_init_user(bio, bvec, nr_bvecs, bytes); 314 if (ret) 315 goto release_pages; 316 if (bvec != stack_vec) 317 kfree(bvec); 318 319 return 0; 320 321 release_pages: 322 bio_integrity_unpin_bvec(bvec, nr_bvecs, false); 323 free_bvec: 324 if (bvec != stack_vec) 325 kfree(bvec); 326 return ret; 327 } 328 329 static void bio_uio_meta_to_bip(struct bio *bio, struct uio_meta *meta) 330 { 331 struct bio_integrity_payload *bip = bio_integrity(bio); 332 333 if (meta->flags & IO_INTEGRITY_CHK_GUARD) 334 bip->bip_flags |= BIP_CHECK_GUARD; 335 if (meta->flags & IO_INTEGRITY_CHK_APPTAG) 336 bip->bip_flags |= BIP_CHECK_APPTAG; 337 if (meta->flags & IO_INTEGRITY_CHK_REFTAG) 338 bip->bip_flags |= BIP_CHECK_REFTAG; 339 340 bip->app_tag = meta->app_tag; 341 } 342 343 int bio_integrity_map_iter(struct bio *bio, struct uio_meta *meta) 344 { 345 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); 346 unsigned int integrity_bytes; 347 int ret; 348 struct iov_iter it; 349 350 if (!bi) 351 return -EINVAL; 352 /* 353 * original meta iterator can be bigger. 354 * process integrity info corresponding to current data buffer only. 355 */ 356 it = meta->iter; 357 integrity_bytes = bio_integrity_bytes(bi, bio_sectors(bio)); 358 if (it.count < integrity_bytes) 359 return -EINVAL; 360 361 /* should fit into two bytes */ 362 BUILD_BUG_ON(IO_INTEGRITY_VALID_FLAGS >= (1 << 16)); 363 364 if (meta->flags && (meta->flags & ~IO_INTEGRITY_VALID_FLAGS)) 365 return -EINVAL; 366 367 it.count = integrity_bytes; 368 ret = bio_integrity_map_user(bio, &it); 369 if (!ret) { 370 bio_uio_meta_to_bip(bio, meta); 371 bip_set_seed(bio_integrity(bio), meta->seed); 372 iov_iter_advance(&meta->iter, integrity_bytes); 373 meta->seed += bio_integrity_intervals(bi, bio_sectors(bio)); 374 } 375 return ret; 376 } 377 378 /** 379 * bio_integrity_advance - Advance integrity vector 380 * @bio: bio whose integrity vector to update 381 * @bytes_done: number of data bytes that have been completed 382 * 383 * Description: This function calculates how many integrity bytes the 384 * number of completed data bytes correspond to and advances the 385 * integrity vector accordingly. 386 */ 387 void bio_integrity_advance(struct bio *bio, unsigned int bytes_done) 388 { 389 struct bio_integrity_payload *bip = bio_integrity(bio); 390 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); 391 unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); 392 393 bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9); 394 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes); 395 } 396 397 /** 398 * bio_integrity_trim - Trim integrity vector 399 * @bio: bio whose integrity vector to update 400 * 401 * Description: Used to trim the integrity vector in a cloned bio. 402 */ 403 void bio_integrity_trim(struct bio *bio) 404 { 405 struct bio_integrity_payload *bip = bio_integrity(bio); 406 struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk); 407 408 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); 409 } 410 EXPORT_SYMBOL(bio_integrity_trim); 411 412 /** 413 * bio_integrity_clone - Callback for cloning bios with integrity metadata 414 * @bio: New bio 415 * @bio_src: Original bio 416 * @gfp_mask: Memory allocation mask 417 * 418 * Description: Called to allocate a bip when cloning a bio 419 */ 420 int bio_integrity_clone(struct bio *bio, struct bio *bio_src, 421 gfp_t gfp_mask) 422 { 423 struct bio_integrity_payload *bip_src = bio_integrity(bio_src); 424 struct bio_integrity_payload *bip; 425 426 BUG_ON(bip_src == NULL); 427 428 bip = bio_integrity_alloc(bio, gfp_mask, 0); 429 if (IS_ERR(bip)) 430 return PTR_ERR(bip); 431 432 bip->bip_vec = bip_src->bip_vec; 433 bip->bip_iter = bip_src->bip_iter; 434 bip->bip_flags = bip_src->bip_flags & BIP_CLONE_FLAGS; 435 bip->app_tag = bip_src->app_tag; 436 437 return 0; 438 } 439