xref: /linux/block/t10-pi.c (revision 8bc7c5e525584903ea83332e18a2118ed3b1985e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * t10_pi.c - Functions for generating and verifying T10 Protection
4  *	      Information.
5  */
6 
7 #include <linux/t10-pi.h>
8 #include <linux/blk-integrity.h>
9 #include <linux/crc-t10dif.h>
10 #include <linux/crc64.h>
11 #include <linux/module.h>
12 #include <net/checksum.h>
13 #include <asm/unaligned.h>
14 #include "blk.h"
15 
16 struct blk_integrity_iter {
17 	void			*prot_buf;
18 	void			*data_buf;
19 	sector_t		seed;
20 	unsigned int		data_size;
21 	unsigned short		interval;
22 	const char		*disk_name;
23 };
24 
25 static __be16 t10_pi_csum(__be16 csum, void *data, unsigned int len,
26 		unsigned char csum_type)
27 {
28 	if (csum_type == BLK_INTEGRITY_CSUM_IP)
29 		return (__force __be16)ip_compute_csum(data, len);
30 	return cpu_to_be16(crc_t10dif_update(be16_to_cpu(csum), data, len));
31 }
32 
33 /*
34  * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
35  * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
36  * tag.
37  */
38 static void t10_pi_generate(struct blk_integrity_iter *iter,
39 		struct blk_integrity *bi)
40 {
41 	u8 offset = bi->pi_offset;
42 	unsigned int i;
43 
44 	for (i = 0 ; i < iter->data_size ; i += iter->interval) {
45 		struct t10_pi_tuple *pi = iter->prot_buf + offset;
46 
47 		pi->guard_tag = t10_pi_csum(0, iter->data_buf, iter->interval,
48 				bi->csum_type);
49 		if (offset)
50 			pi->guard_tag = t10_pi_csum(pi->guard_tag,
51 					iter->prot_buf, offset, bi->csum_type);
52 		pi->app_tag = 0;
53 
54 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
55 			pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
56 		else
57 			pi->ref_tag = 0;
58 
59 		iter->data_buf += iter->interval;
60 		iter->prot_buf += bi->tuple_size;
61 		iter->seed++;
62 	}
63 }
64 
65 static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
66 		struct blk_integrity *bi)
67 {
68 	u8 offset = bi->pi_offset;
69 	unsigned int i;
70 
71 	for (i = 0 ; i < iter->data_size ; i += iter->interval) {
72 		struct t10_pi_tuple *pi = iter->prot_buf + offset;
73 		__be16 csum;
74 
75 		if (bi->flags & BLK_INTEGRITY_REF_TAG) {
76 			if (pi->app_tag == T10_PI_APP_ESCAPE)
77 				goto next;
78 
79 			if (be32_to_cpu(pi->ref_tag) !=
80 			    lower_32_bits(iter->seed)) {
81 				pr_err("%s: ref tag error at location %llu " \
82 				       "(rcvd %u)\n", iter->disk_name,
83 				       (unsigned long long)
84 				       iter->seed, be32_to_cpu(pi->ref_tag));
85 				return BLK_STS_PROTECTION;
86 			}
87 		} else {
88 			if (pi->app_tag == T10_PI_APP_ESCAPE &&
89 			    pi->ref_tag == T10_PI_REF_ESCAPE)
90 				goto next;
91 		}
92 
93 		csum = t10_pi_csum(0, iter->data_buf, iter->interval,
94 				bi->csum_type);
95 		if (offset)
96 			csum = t10_pi_csum(csum, iter->prot_buf, offset,
97 					bi->csum_type);
98 
99 		if (pi->guard_tag != csum) {
100 			pr_err("%s: guard tag error at sector %llu " \
101 			       "(rcvd %04x, want %04x)\n", iter->disk_name,
102 			       (unsigned long long)iter->seed,
103 			       be16_to_cpu(pi->guard_tag), be16_to_cpu(csum));
104 			return BLK_STS_PROTECTION;
105 		}
106 
107 next:
108 		iter->data_buf += iter->interval;
109 		iter->prot_buf += bi->tuple_size;
110 		iter->seed++;
111 	}
112 
113 	return BLK_STS_OK;
114 }
115 
116 /**
117  * t10_pi_type1_prepare - prepare PI prior submitting request to device
118  * @rq:              request with PI that should be prepared
119  *
120  * For Type 1/Type 2, the virtual start sector is the one that was
121  * originally submitted by the block layer for the ref_tag usage. Due to
122  * partitioning, MD/DM cloning, etc. the actual physical start sector is
123  * likely to be different. Remap protection information to match the
124  * physical LBA.
125  */
126 static void t10_pi_type1_prepare(struct request *rq)
127 {
128 	struct blk_integrity *bi = &rq->q->limits.integrity;
129 	const int tuple_sz = bi->tuple_size;
130 	u32 ref_tag = t10_pi_ref_tag(rq);
131 	u8 offset = bi->pi_offset;
132 	struct bio *bio;
133 
134 	__rq_for_each_bio(bio, rq) {
135 		struct bio_integrity_payload *bip = bio_integrity(bio);
136 		u32 virt = bip_get_seed(bip) & 0xffffffff;
137 		struct bio_vec iv;
138 		struct bvec_iter iter;
139 
140 		/* Already remapped? */
141 		if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
142 			break;
143 
144 		bip_for_each_vec(iv, bip, iter) {
145 			unsigned int j;
146 			void *p;
147 
148 			p = bvec_kmap_local(&iv);
149 			for (j = 0; j < iv.bv_len; j += tuple_sz) {
150 				struct t10_pi_tuple *pi = p + offset;
151 
152 				if (be32_to_cpu(pi->ref_tag) == virt)
153 					pi->ref_tag = cpu_to_be32(ref_tag);
154 				virt++;
155 				ref_tag++;
156 				p += tuple_sz;
157 			}
158 			kunmap_local(p);
159 		}
160 
161 		bip->bip_flags |= BIP_MAPPED_INTEGRITY;
162 	}
163 }
164 
165 /**
166  * t10_pi_type1_complete - prepare PI prior returning request to the blk layer
167  * @rq:              request with PI that should be prepared
168  * @nr_bytes:        total bytes to prepare
169  *
170  * For Type 1/Type 2, the virtual start sector is the one that was
171  * originally submitted by the block layer for the ref_tag usage. Due to
172  * partitioning, MD/DM cloning, etc. the actual physical start sector is
173  * likely to be different. Since the physical start sector was submitted
174  * to the device, we should remap it back to virtual values expected by the
175  * block layer.
176  */
177 static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
178 {
179 	struct blk_integrity *bi = &rq->q->limits.integrity;
180 	unsigned intervals = nr_bytes >> bi->interval_exp;
181 	const int tuple_sz = bi->tuple_size;
182 	u32 ref_tag = t10_pi_ref_tag(rq);
183 	u8 offset = bi->pi_offset;
184 	struct bio *bio;
185 
186 	__rq_for_each_bio(bio, rq) {
187 		struct bio_integrity_payload *bip = bio_integrity(bio);
188 		u32 virt = bip_get_seed(bip) & 0xffffffff;
189 		struct bio_vec iv;
190 		struct bvec_iter iter;
191 
192 		bip_for_each_vec(iv, bip, iter) {
193 			unsigned int j;
194 			void *p;
195 
196 			p = bvec_kmap_local(&iv);
197 			for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
198 				struct t10_pi_tuple *pi = p + offset;
199 
200 				if (be32_to_cpu(pi->ref_tag) == ref_tag)
201 					pi->ref_tag = cpu_to_be32(virt);
202 				virt++;
203 				ref_tag++;
204 				intervals--;
205 				p += tuple_sz;
206 			}
207 			kunmap_local(p);
208 		}
209 	}
210 }
211 
212 static __be64 ext_pi_crc64(u64 crc, void *data, unsigned int len)
213 {
214 	return cpu_to_be64(crc64_rocksoft_update(crc, data, len));
215 }
216 
217 static void ext_pi_crc64_generate(struct blk_integrity_iter *iter,
218 		struct blk_integrity *bi)
219 {
220 	u8 offset = bi->pi_offset;
221 	unsigned int i;
222 
223 	for (i = 0 ; i < iter->data_size ; i += iter->interval) {
224 		struct crc64_pi_tuple *pi = iter->prot_buf + offset;
225 
226 		pi->guard_tag = ext_pi_crc64(0, iter->data_buf, iter->interval);
227 		if (offset)
228 			pi->guard_tag = ext_pi_crc64(be64_to_cpu(pi->guard_tag),
229 					iter->prot_buf, offset);
230 		pi->app_tag = 0;
231 
232 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
233 			put_unaligned_be48(iter->seed, pi->ref_tag);
234 		else
235 			put_unaligned_be48(0ULL, pi->ref_tag);
236 
237 		iter->data_buf += iter->interval;
238 		iter->prot_buf += bi->tuple_size;
239 		iter->seed++;
240 	}
241 }
242 
243 static bool ext_pi_ref_escape(u8 *ref_tag)
244 {
245 	static u8 ref_escape[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
246 
247 	return memcmp(ref_tag, ref_escape, sizeof(ref_escape)) == 0;
248 }
249 
250 static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter,
251 		struct blk_integrity *bi)
252 {
253 	u8 offset = bi->pi_offset;
254 	unsigned int i;
255 
256 	for (i = 0; i < iter->data_size; i += iter->interval) {
257 		struct crc64_pi_tuple *pi = iter->prot_buf + offset;
258 		u64 ref, seed;
259 		__be64 csum;
260 
261 		if (bi->flags & BLK_INTEGRITY_REF_TAG) {
262 			if (pi->app_tag == T10_PI_APP_ESCAPE)
263 				goto next;
264 
265 			ref = get_unaligned_be48(pi->ref_tag);
266 			seed = lower_48_bits(iter->seed);
267 			if (ref != seed) {
268 				pr_err("%s: ref tag error at location %llu (rcvd %llu)\n",
269 					iter->disk_name, seed, ref);
270 				return BLK_STS_PROTECTION;
271 			}
272 		} else {
273 			if (pi->app_tag == T10_PI_APP_ESCAPE &&
274 			    ext_pi_ref_escape(pi->ref_tag))
275 				goto next;
276 		}
277 
278 		csum = ext_pi_crc64(0, iter->data_buf, iter->interval);
279 		if (offset)
280 			csum = ext_pi_crc64(be64_to_cpu(csum), iter->prot_buf,
281 					    offset);
282 
283 		if (pi->guard_tag != csum) {
284 			pr_err("%s: guard tag error at sector %llu " \
285 			       "(rcvd %016llx, want %016llx)\n",
286 				iter->disk_name, (unsigned long long)iter->seed,
287 				be64_to_cpu(pi->guard_tag), be64_to_cpu(csum));
288 			return BLK_STS_PROTECTION;
289 		}
290 
291 next:
292 		iter->data_buf += iter->interval;
293 		iter->prot_buf += bi->tuple_size;
294 		iter->seed++;
295 	}
296 
297 	return BLK_STS_OK;
298 }
299 
300 static void ext_pi_type1_prepare(struct request *rq)
301 {
302 	struct blk_integrity *bi = &rq->q->limits.integrity;
303 	const int tuple_sz = bi->tuple_size;
304 	u64 ref_tag = ext_pi_ref_tag(rq);
305 	u8 offset = bi->pi_offset;
306 	struct bio *bio;
307 
308 	__rq_for_each_bio(bio, rq) {
309 		struct bio_integrity_payload *bip = bio_integrity(bio);
310 		u64 virt = lower_48_bits(bip_get_seed(bip));
311 		struct bio_vec iv;
312 		struct bvec_iter iter;
313 
314 		/* Already remapped? */
315 		if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
316 			break;
317 
318 		bip_for_each_vec(iv, bip, iter) {
319 			unsigned int j;
320 			void *p;
321 
322 			p = bvec_kmap_local(&iv);
323 			for (j = 0; j < iv.bv_len; j += tuple_sz) {
324 				struct crc64_pi_tuple *pi = p +  offset;
325 				u64 ref = get_unaligned_be48(pi->ref_tag);
326 
327 				if (ref == virt)
328 					put_unaligned_be48(ref_tag, pi->ref_tag);
329 				virt++;
330 				ref_tag++;
331 				p += tuple_sz;
332 			}
333 			kunmap_local(p);
334 		}
335 
336 		bip->bip_flags |= BIP_MAPPED_INTEGRITY;
337 	}
338 }
339 
340 static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
341 {
342 	struct blk_integrity *bi = &rq->q->limits.integrity;
343 	unsigned intervals = nr_bytes >> bi->interval_exp;
344 	const int tuple_sz = bi->tuple_size;
345 	u64 ref_tag = ext_pi_ref_tag(rq);
346 	u8 offset = bi->pi_offset;
347 	struct bio *bio;
348 
349 	__rq_for_each_bio(bio, rq) {
350 		struct bio_integrity_payload *bip = bio_integrity(bio);
351 		u64 virt = lower_48_bits(bip_get_seed(bip));
352 		struct bio_vec iv;
353 		struct bvec_iter iter;
354 
355 		bip_for_each_vec(iv, bip, iter) {
356 			unsigned int j;
357 			void *p;
358 
359 			p = bvec_kmap_local(&iv);
360 			for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
361 				struct crc64_pi_tuple *pi = p + offset;
362 				u64 ref = get_unaligned_be48(pi->ref_tag);
363 
364 				if (ref == ref_tag)
365 					put_unaligned_be48(virt, pi->ref_tag);
366 				virt++;
367 				ref_tag++;
368 				intervals--;
369 				p += tuple_sz;
370 			}
371 			kunmap_local(p);
372 		}
373 	}
374 }
375 
376 void blk_integrity_generate(struct bio *bio)
377 {
378 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
379 	struct bio_integrity_payload *bip = bio_integrity(bio);
380 	struct blk_integrity_iter iter;
381 	struct bvec_iter bviter;
382 	struct bio_vec bv;
383 
384 	iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
385 	iter.interval = 1 << bi->interval_exp;
386 	iter.seed = bio->bi_iter.bi_sector;
387 	iter.prot_buf = bvec_virt(bip->bip_vec);
388 	bio_for_each_segment(bv, bio, bviter) {
389 		void *kaddr = bvec_kmap_local(&bv);
390 
391 		iter.data_buf = kaddr;
392 		iter.data_size = bv.bv_len;
393 		switch (bi->csum_type) {
394 		case BLK_INTEGRITY_CSUM_CRC64:
395 			ext_pi_crc64_generate(&iter, bi);
396 			break;
397 		case BLK_INTEGRITY_CSUM_CRC:
398 		case BLK_INTEGRITY_CSUM_IP:
399 			t10_pi_generate(&iter, bi);
400 			break;
401 		default:
402 			break;
403 		}
404 		kunmap_local(kaddr);
405 	}
406 }
407 
408 void blk_integrity_verify(struct bio *bio)
409 {
410 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
411 	struct bio_integrity_payload *bip = bio_integrity(bio);
412 	struct blk_integrity_iter iter;
413 	struct bvec_iter bviter;
414 	struct bio_vec bv;
415 
416 	/*
417 	 * At the moment verify is called bi_iter has been advanced during split
418 	 * and completion, so use the copy created during submission here.
419 	 */
420 	iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
421 	iter.interval = 1 << bi->interval_exp;
422 	iter.seed = bip->bio_iter.bi_sector;
423 	iter.prot_buf = bvec_virt(bip->bip_vec);
424 	__bio_for_each_segment(bv, bio, bviter, bip->bio_iter) {
425 		void *kaddr = bvec_kmap_local(&bv);
426 		blk_status_t ret = BLK_STS_OK;
427 
428 		iter.data_buf = kaddr;
429 		iter.data_size = bv.bv_len;
430 		switch (bi->csum_type) {
431 		case BLK_INTEGRITY_CSUM_CRC64:
432 			ret = ext_pi_crc64_verify(&iter, bi);
433 			break;
434 		case BLK_INTEGRITY_CSUM_CRC:
435 		case BLK_INTEGRITY_CSUM_IP:
436 			ret = t10_pi_verify(&iter, bi);
437 			break;
438 		default:
439 			break;
440 		}
441 		kunmap_local(kaddr);
442 
443 		if (ret) {
444 			bio->bi_status = ret;
445 			return;
446 		}
447 	}
448 }
449 
450 void blk_integrity_prepare(struct request *rq)
451 {
452 	struct blk_integrity *bi = &rq->q->limits.integrity;
453 
454 	if (!(bi->flags & BLK_INTEGRITY_REF_TAG))
455 		return;
456 
457 	if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC64)
458 		ext_pi_type1_prepare(rq);
459 	else
460 		t10_pi_type1_prepare(rq);
461 }
462 
463 void blk_integrity_complete(struct request *rq, unsigned int nr_bytes)
464 {
465 	struct blk_integrity *bi = &rq->q->limits.integrity;
466 
467 	if (!(bi->flags & BLK_INTEGRITY_REF_TAG))
468 		return;
469 
470 	if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC64)
471 		ext_pi_type1_complete(rq, nr_bytes);
472 	else
473 		t10_pi_type1_complete(rq, nr_bytes);
474 }
475 
476 MODULE_DESCRIPTION("T10 Protection Information module");
477 MODULE_LICENSE("GPL");
478