1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * t10_pi.c - Functions for generating and verifying T10 Protection 4 * Information. 5 */ 6 7 #include <linux/t10-pi.h> 8 #include <linux/blk-integrity.h> 9 #include <linux/crc-t10dif.h> 10 #include <linux/crc64.h> 11 #include <linux/module.h> 12 #include <net/checksum.h> 13 #include <asm/unaligned.h> 14 #include "blk.h" 15 16 static __be16 t10_pi_csum(__be16 csum, void *data, unsigned int len, 17 unsigned char csum_type) 18 { 19 if (csum_type == BLK_INTEGRITY_CSUM_IP) 20 return (__force __be16)ip_compute_csum(data, len); 21 return cpu_to_be16(crc_t10dif_update(be16_to_cpu(csum), data, len)); 22 } 23 24 /* 25 * Type 1 and Type 2 protection use the same format: 16 bit guard tag, 26 * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref 27 * tag. 28 */ 29 static void t10_pi_generate(struct blk_integrity_iter *iter, 30 struct blk_integrity *bi) 31 { 32 u8 offset = bi->pi_offset; 33 unsigned int i; 34 35 for (i = 0 ; i < iter->data_size ; i += iter->interval) { 36 struct t10_pi_tuple *pi = iter->prot_buf + offset; 37 38 pi->guard_tag = t10_pi_csum(0, iter->data_buf, iter->interval, 39 bi->csum_type); 40 if (offset) 41 pi->guard_tag = t10_pi_csum(pi->guard_tag, 42 iter->prot_buf, offset, bi->csum_type); 43 pi->app_tag = 0; 44 45 if (bi->flags & BLK_INTEGRITY_REF_TAG) 46 pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed)); 47 else 48 pi->ref_tag = 0; 49 50 iter->data_buf += iter->interval; 51 iter->prot_buf += bi->tuple_size; 52 iter->seed++; 53 } 54 } 55 56 static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter, 57 struct blk_integrity *bi) 58 { 59 u8 offset = bi->pi_offset; 60 unsigned int i; 61 62 for (i = 0 ; i < iter->data_size ; i += iter->interval) { 63 struct t10_pi_tuple *pi = iter->prot_buf + offset; 64 __be16 csum; 65 66 if (bi->flags & BLK_INTEGRITY_REF_TAG) { 67 if (pi->app_tag == T10_PI_APP_ESCAPE) 68 goto next; 69 70 if (be32_to_cpu(pi->ref_tag) != 71 lower_32_bits(iter->seed)) { 72 pr_err("%s: ref tag error at location %llu " \ 73 "(rcvd %u)\n", iter->disk_name, 74 (unsigned long long) 75 iter->seed, be32_to_cpu(pi->ref_tag)); 76 return BLK_STS_PROTECTION; 77 } 78 } else { 79 if (pi->app_tag == T10_PI_APP_ESCAPE && 80 pi->ref_tag == T10_PI_REF_ESCAPE) 81 goto next; 82 } 83 84 csum = t10_pi_csum(0, iter->data_buf, iter->interval, 85 bi->csum_type); 86 if (offset) 87 csum = t10_pi_csum(csum, iter->prot_buf, offset, 88 bi->csum_type); 89 90 if (pi->guard_tag != csum) { 91 pr_err("%s: guard tag error at sector %llu " \ 92 "(rcvd %04x, want %04x)\n", iter->disk_name, 93 (unsigned long long)iter->seed, 94 be16_to_cpu(pi->guard_tag), be16_to_cpu(csum)); 95 return BLK_STS_PROTECTION; 96 } 97 98 next: 99 iter->data_buf += iter->interval; 100 iter->prot_buf += bi->tuple_size; 101 iter->seed++; 102 } 103 104 return BLK_STS_OK; 105 } 106 107 /** 108 * t10_pi_type1_prepare - prepare PI prior submitting request to device 109 * @rq: request with PI that should be prepared 110 * 111 * For Type 1/Type 2, the virtual start sector is the one that was 112 * originally submitted by the block layer for the ref_tag usage. Due to 113 * partitioning, MD/DM cloning, etc. the actual physical start sector is 114 * likely to be different. Remap protection information to match the 115 * physical LBA. 116 */ 117 static void t10_pi_type1_prepare(struct request *rq) 118 { 119 struct blk_integrity *bi = &rq->q->limits.integrity; 120 const int tuple_sz = bi->tuple_size; 121 u32 ref_tag = t10_pi_ref_tag(rq); 122 u8 offset = bi->pi_offset; 123 struct bio *bio; 124 125 __rq_for_each_bio(bio, rq) { 126 struct bio_integrity_payload *bip = bio_integrity(bio); 127 u32 virt = bip_get_seed(bip) & 0xffffffff; 128 struct bio_vec iv; 129 struct bvec_iter iter; 130 131 /* Already remapped? */ 132 if (bip->bip_flags & BIP_MAPPED_INTEGRITY) 133 break; 134 135 bip_for_each_vec(iv, bip, iter) { 136 unsigned int j; 137 void *p; 138 139 p = bvec_kmap_local(&iv); 140 for (j = 0; j < iv.bv_len; j += tuple_sz) { 141 struct t10_pi_tuple *pi = p + offset; 142 143 if (be32_to_cpu(pi->ref_tag) == virt) 144 pi->ref_tag = cpu_to_be32(ref_tag); 145 virt++; 146 ref_tag++; 147 p += tuple_sz; 148 } 149 kunmap_local(p); 150 } 151 152 bip->bip_flags |= BIP_MAPPED_INTEGRITY; 153 } 154 } 155 156 /** 157 * t10_pi_type1_complete - prepare PI prior returning request to the blk layer 158 * @rq: request with PI that should be prepared 159 * @nr_bytes: total bytes to prepare 160 * 161 * For Type 1/Type 2, the virtual start sector is the one that was 162 * originally submitted by the block layer for the ref_tag usage. Due to 163 * partitioning, MD/DM cloning, etc. the actual physical start sector is 164 * likely to be different. Since the physical start sector was submitted 165 * to the device, we should remap it back to virtual values expected by the 166 * block layer. 167 */ 168 static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes) 169 { 170 struct blk_integrity *bi = &rq->q->limits.integrity; 171 unsigned intervals = nr_bytes >> bi->interval_exp; 172 const int tuple_sz = bi->tuple_size; 173 u32 ref_tag = t10_pi_ref_tag(rq); 174 u8 offset = bi->pi_offset; 175 struct bio *bio; 176 177 __rq_for_each_bio(bio, rq) { 178 struct bio_integrity_payload *bip = bio_integrity(bio); 179 u32 virt = bip_get_seed(bip) & 0xffffffff; 180 struct bio_vec iv; 181 struct bvec_iter iter; 182 183 bip_for_each_vec(iv, bip, iter) { 184 unsigned int j; 185 void *p; 186 187 p = bvec_kmap_local(&iv); 188 for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) { 189 struct t10_pi_tuple *pi = p + offset; 190 191 if (be32_to_cpu(pi->ref_tag) == ref_tag) 192 pi->ref_tag = cpu_to_be32(virt); 193 virt++; 194 ref_tag++; 195 intervals--; 196 p += tuple_sz; 197 } 198 kunmap_local(p); 199 } 200 } 201 } 202 203 static __be64 ext_pi_crc64(u64 crc, void *data, unsigned int len) 204 { 205 return cpu_to_be64(crc64_rocksoft_update(crc, data, len)); 206 } 207 208 static void ext_pi_crc64_generate(struct blk_integrity_iter *iter, 209 struct blk_integrity *bi) 210 { 211 u8 offset = bi->pi_offset; 212 unsigned int i; 213 214 for (i = 0 ; i < iter->data_size ; i += iter->interval) { 215 struct crc64_pi_tuple *pi = iter->prot_buf + offset; 216 217 pi->guard_tag = ext_pi_crc64(0, iter->data_buf, iter->interval); 218 if (offset) 219 pi->guard_tag = ext_pi_crc64(be64_to_cpu(pi->guard_tag), 220 iter->prot_buf, offset); 221 pi->app_tag = 0; 222 223 if (bi->flags & BLK_INTEGRITY_REF_TAG) 224 put_unaligned_be48(iter->seed, pi->ref_tag); 225 else 226 put_unaligned_be48(0ULL, pi->ref_tag); 227 228 iter->data_buf += iter->interval; 229 iter->prot_buf += bi->tuple_size; 230 iter->seed++; 231 } 232 } 233 234 static bool ext_pi_ref_escape(u8 *ref_tag) 235 { 236 static u8 ref_escape[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 237 238 return memcmp(ref_tag, ref_escape, sizeof(ref_escape)) == 0; 239 } 240 241 static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter, 242 struct blk_integrity *bi) 243 { 244 u8 offset = bi->pi_offset; 245 unsigned int i; 246 247 for (i = 0; i < iter->data_size; i += iter->interval) { 248 struct crc64_pi_tuple *pi = iter->prot_buf + offset; 249 u64 ref, seed; 250 __be64 csum; 251 252 if (bi->flags & BLK_INTEGRITY_REF_TAG) { 253 if (pi->app_tag == T10_PI_APP_ESCAPE) 254 goto next; 255 256 ref = get_unaligned_be48(pi->ref_tag); 257 seed = lower_48_bits(iter->seed); 258 if (ref != seed) { 259 pr_err("%s: ref tag error at location %llu (rcvd %llu)\n", 260 iter->disk_name, seed, ref); 261 return BLK_STS_PROTECTION; 262 } 263 } else { 264 if (pi->app_tag == T10_PI_APP_ESCAPE && 265 ext_pi_ref_escape(pi->ref_tag)) 266 goto next; 267 } 268 269 csum = ext_pi_crc64(0, iter->data_buf, iter->interval); 270 if (offset) 271 csum = ext_pi_crc64(be64_to_cpu(csum), iter->prot_buf, 272 offset); 273 274 if (pi->guard_tag != csum) { 275 pr_err("%s: guard tag error at sector %llu " \ 276 "(rcvd %016llx, want %016llx)\n", 277 iter->disk_name, (unsigned long long)iter->seed, 278 be64_to_cpu(pi->guard_tag), be64_to_cpu(csum)); 279 return BLK_STS_PROTECTION; 280 } 281 282 next: 283 iter->data_buf += iter->interval; 284 iter->prot_buf += bi->tuple_size; 285 iter->seed++; 286 } 287 288 return BLK_STS_OK; 289 } 290 291 static void ext_pi_type1_prepare(struct request *rq) 292 { 293 struct blk_integrity *bi = &rq->q->limits.integrity; 294 const int tuple_sz = bi->tuple_size; 295 u64 ref_tag = ext_pi_ref_tag(rq); 296 u8 offset = bi->pi_offset; 297 struct bio *bio; 298 299 __rq_for_each_bio(bio, rq) { 300 struct bio_integrity_payload *bip = bio_integrity(bio); 301 u64 virt = lower_48_bits(bip_get_seed(bip)); 302 struct bio_vec iv; 303 struct bvec_iter iter; 304 305 /* Already remapped? */ 306 if (bip->bip_flags & BIP_MAPPED_INTEGRITY) 307 break; 308 309 bip_for_each_vec(iv, bip, iter) { 310 unsigned int j; 311 void *p; 312 313 p = bvec_kmap_local(&iv); 314 for (j = 0; j < iv.bv_len; j += tuple_sz) { 315 struct crc64_pi_tuple *pi = p + offset; 316 u64 ref = get_unaligned_be48(pi->ref_tag); 317 318 if (ref == virt) 319 put_unaligned_be48(ref_tag, pi->ref_tag); 320 virt++; 321 ref_tag++; 322 p += tuple_sz; 323 } 324 kunmap_local(p); 325 } 326 327 bip->bip_flags |= BIP_MAPPED_INTEGRITY; 328 } 329 } 330 331 static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes) 332 { 333 struct blk_integrity *bi = &rq->q->limits.integrity; 334 unsigned intervals = nr_bytes >> bi->interval_exp; 335 const int tuple_sz = bi->tuple_size; 336 u64 ref_tag = ext_pi_ref_tag(rq); 337 u8 offset = bi->pi_offset; 338 struct bio *bio; 339 340 __rq_for_each_bio(bio, rq) { 341 struct bio_integrity_payload *bip = bio_integrity(bio); 342 u64 virt = lower_48_bits(bip_get_seed(bip)); 343 struct bio_vec iv; 344 struct bvec_iter iter; 345 346 bip_for_each_vec(iv, bip, iter) { 347 unsigned int j; 348 void *p; 349 350 p = bvec_kmap_local(&iv); 351 for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) { 352 struct crc64_pi_tuple *pi = p + offset; 353 u64 ref = get_unaligned_be48(pi->ref_tag); 354 355 if (ref == ref_tag) 356 put_unaligned_be48(virt, pi->ref_tag); 357 virt++; 358 ref_tag++; 359 intervals--; 360 p += tuple_sz; 361 } 362 kunmap_local(p); 363 } 364 } 365 } 366 367 void blk_integrity_generate(struct blk_integrity_iter *iter, 368 struct blk_integrity *bi) 369 { 370 switch (bi->csum_type) { 371 case BLK_INTEGRITY_CSUM_CRC64: 372 ext_pi_crc64_generate(iter, bi); 373 break; 374 case BLK_INTEGRITY_CSUM_CRC: 375 case BLK_INTEGRITY_CSUM_IP: 376 t10_pi_generate(iter, bi); 377 break; 378 default: 379 break; 380 } 381 } 382 383 blk_status_t blk_integrity_verify(struct blk_integrity_iter *iter, 384 struct blk_integrity *bi) 385 { 386 switch (bi->csum_type) { 387 case BLK_INTEGRITY_CSUM_CRC64: 388 return ext_pi_crc64_verify(iter, bi); 389 case BLK_INTEGRITY_CSUM_CRC: 390 case BLK_INTEGRITY_CSUM_IP: 391 return t10_pi_verify(iter, bi); 392 default: 393 return BLK_STS_OK; 394 } 395 } 396 397 void blk_integrity_prepare(struct request *rq) 398 { 399 struct blk_integrity *bi = &rq->q->limits.integrity; 400 401 if (!(bi->flags & BLK_INTEGRITY_REF_TAG)) 402 return; 403 404 if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC64) 405 ext_pi_type1_prepare(rq); 406 else 407 t10_pi_type1_prepare(rq); 408 } 409 410 void blk_integrity_complete(struct request *rq, unsigned int nr_bytes) 411 { 412 struct blk_integrity *bi = &rq->q->limits.integrity; 413 414 if (!(bi->flags & BLK_INTEGRITY_REF_TAG)) 415 return; 416 417 if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC64) 418 ext_pi_type1_complete(rq, nr_bytes); 419 else 420 t10_pi_type1_complete(rq, nr_bytes); 421 } 422 423 MODULE_DESCRIPTION("T10 Protection Information module"); 424 MODULE_LICENSE("GPL"); 425