xref: /linux/block/t10-pi.c (revision 7ec462100ef9142344ddbf86f2c3008b97acddbe)
18c16567dSChristoph Hellwig // SPDX-License-Identifier: GPL-2.0
22341c2f8SMartin K. Petersen /*
32341c2f8SMartin K. Petersen  * t10_pi.c - Functions for generating and verifying T10 Protection
42341c2f8SMartin K. Petersen  *	      Information.
52341c2f8SMartin K. Petersen  */
62341c2f8SMartin K. Petersen 
72341c2f8SMartin K. Petersen #include <linux/t10-pi.h>
8fe45e630SChristoph Hellwig #include <linux/blk-integrity.h>
92341c2f8SMartin K. Petersen #include <linux/crc-t10dif.h>
10a7d4383fSKeith Busch #include <linux/crc64.h>
112341c2f8SMartin K. Petersen #include <net/checksum.h>
12*5f60d5f6SAl Viro #include <linux/unaligned.h>
13e9f5f44aSChristoph Hellwig #include "blk.h"
142341c2f8SMartin K. Petersen 
15d19b4634SChristoph Hellwig struct blk_integrity_iter {
16d19b4634SChristoph Hellwig 	void			*prot_buf;
17d19b4634SChristoph Hellwig 	void			*data_buf;
18d19b4634SChristoph Hellwig 	sector_t		seed;
19d19b4634SChristoph Hellwig 	unsigned int		data_size;
20d19b4634SChristoph Hellwig 	unsigned short		interval;
21d19b4634SChristoph Hellwig 	const char		*disk_name;
22d19b4634SChristoph Hellwig };
23d19b4634SChristoph Hellwig 
t10_pi_csum(__be16 csum,void * data,unsigned int len,unsigned char csum_type)24e9f5f44aSChristoph Hellwig static __be16 t10_pi_csum(__be16 csum, void *data, unsigned int len,
25e9f5f44aSChristoph Hellwig 		unsigned char csum_type)
262341c2f8SMartin K. Petersen {
27e9f5f44aSChristoph Hellwig 	if (csum_type == BLK_INTEGRITY_CSUM_IP)
282341c2f8SMartin K. Petersen 		return (__force __be16)ip_compute_csum(data, len);
29e9f5f44aSChristoph Hellwig 	return cpu_to_be16(crc_t10dif_update(be16_to_cpu(csum), data, len));
302341c2f8SMartin K. Petersen }
312341c2f8SMartin K. Petersen 
322341c2f8SMartin K. Petersen /*
332341c2f8SMartin K. Petersen  * Type 1 and Type 2 protection use the same format: 16 bit guard tag,
342341c2f8SMartin K. Petersen  * 16 bit app tag, 32 bit reference tag. Type 3 does not define the ref
352341c2f8SMartin K. Petersen  * tag.
362341c2f8SMartin K. Petersen  */
t10_pi_generate(struct blk_integrity_iter * iter,struct blk_integrity * bi)37e9f5f44aSChristoph Hellwig static void t10_pi_generate(struct blk_integrity_iter *iter,
38e9f5f44aSChristoph Hellwig 		struct blk_integrity *bi)
392341c2f8SMartin K. Petersen {
40e9f5f44aSChristoph Hellwig 	u8 offset = bi->pi_offset;
412341c2f8SMartin K. Petersen 	unsigned int i;
422341c2f8SMartin K. Petersen 
432341c2f8SMartin K. Petersen 	for (i = 0 ; i < iter->data_size ; i += iter->interval) {
4460d21aacSKanchan Joshi 		struct t10_pi_tuple *pi = iter->prot_buf + offset;
452341c2f8SMartin K. Petersen 
46e9f5f44aSChristoph Hellwig 		pi->guard_tag = t10_pi_csum(0, iter->data_buf, iter->interval,
47e9f5f44aSChristoph Hellwig 				bi->csum_type);
4860d21aacSKanchan Joshi 		if (offset)
49e9f5f44aSChristoph Hellwig 			pi->guard_tag = t10_pi_csum(pi->guard_tag,
50e9f5f44aSChristoph Hellwig 					iter->prot_buf, offset, bi->csum_type);
512341c2f8SMartin K. Petersen 		pi->app_tag = 0;
522341c2f8SMartin K. Petersen 
53e9f5f44aSChristoph Hellwig 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
542341c2f8SMartin K. Petersen 			pi->ref_tag = cpu_to_be32(lower_32_bits(iter->seed));
552341c2f8SMartin K. Petersen 		else
562341c2f8SMartin K. Petersen 			pi->ref_tag = 0;
572341c2f8SMartin K. Petersen 
582341c2f8SMartin K. Petersen 		iter->data_buf += iter->interval;
59e9f5f44aSChristoph Hellwig 		iter->prot_buf += bi->tuple_size;
602341c2f8SMartin K. Petersen 		iter->seed++;
612341c2f8SMartin K. Petersen 	}
622341c2f8SMartin K. Petersen }
632341c2f8SMartin K. Petersen 
t10_pi_verify(struct blk_integrity_iter * iter,struct blk_integrity * bi)644e4cbee9SChristoph Hellwig static blk_status_t t10_pi_verify(struct blk_integrity_iter *iter,
65e9f5f44aSChristoph Hellwig 		struct blk_integrity *bi)
662341c2f8SMartin K. Petersen {
67e9f5f44aSChristoph Hellwig 	u8 offset = bi->pi_offset;
682341c2f8SMartin K. Petersen 	unsigned int i;
692341c2f8SMartin K. Petersen 
702341c2f8SMartin K. Petersen 	for (i = 0 ; i < iter->data_size ; i += iter->interval) {
7160d21aacSKanchan Joshi 		struct t10_pi_tuple *pi = iter->prot_buf + offset;
722341c2f8SMartin K. Petersen 		__be16 csum;
732341c2f8SMartin K. Petersen 
74e9f5f44aSChristoph Hellwig 		if (bi->flags & BLK_INTEGRITY_REF_TAG) {
75128b6f9fSDmitry Monakhov 			if (pi->app_tag == T10_PI_APP_ESCAPE)
762341c2f8SMartin K. Petersen 				goto next;
772341c2f8SMartin K. Petersen 
782341c2f8SMartin K. Petersen 			if (be32_to_cpu(pi->ref_tag) !=
792341c2f8SMartin K. Petersen 			    lower_32_bits(iter->seed)) {
802341c2f8SMartin K. Petersen 				pr_err("%s: ref tag error at location %llu " \
812341c2f8SMartin K. Petersen 				       "(rcvd %u)\n", iter->disk_name,
822341c2f8SMartin K. Petersen 				       (unsigned long long)
832341c2f8SMartin K. Petersen 				       iter->seed, be32_to_cpu(pi->ref_tag));
84a462b950SBart Van Assche 				return BLK_STS_PROTECTION;
852341c2f8SMartin K. Petersen 			}
86e9f5f44aSChristoph Hellwig 		} else {
87128b6f9fSDmitry Monakhov 			if (pi->app_tag == T10_PI_APP_ESCAPE &&
88128b6f9fSDmitry Monakhov 			    pi->ref_tag == T10_PI_REF_ESCAPE)
892341c2f8SMartin K. Petersen 				goto next;
902341c2f8SMartin K. Petersen 		}
912341c2f8SMartin K. Petersen 
92e9f5f44aSChristoph Hellwig 		csum = t10_pi_csum(0, iter->data_buf, iter->interval,
93e9f5f44aSChristoph Hellwig 				bi->csum_type);
9460d21aacSKanchan Joshi 		if (offset)
95e9f5f44aSChristoph Hellwig 			csum = t10_pi_csum(csum, iter->prot_buf, offset,
96e9f5f44aSChristoph Hellwig 					bi->csum_type);
972341c2f8SMartin K. Petersen 
982341c2f8SMartin K. Petersen 		if (pi->guard_tag != csum) {
992341c2f8SMartin K. Petersen 			pr_err("%s: guard tag error at sector %llu " \
1002341c2f8SMartin K. Petersen 			       "(rcvd %04x, want %04x)\n", iter->disk_name,
1012341c2f8SMartin K. Petersen 			       (unsigned long long)iter->seed,
1022341c2f8SMartin K. Petersen 			       be16_to_cpu(pi->guard_tag), be16_to_cpu(csum));
1034e4cbee9SChristoph Hellwig 			return BLK_STS_PROTECTION;
1042341c2f8SMartin K. Petersen 		}
1052341c2f8SMartin K. Petersen 
1062341c2f8SMartin K. Petersen next:
1072341c2f8SMartin K. Petersen 		iter->data_buf += iter->interval;
108e9f5f44aSChristoph Hellwig 		iter->prot_buf += bi->tuple_size;
1092341c2f8SMartin K. Petersen 		iter->seed++;
1102341c2f8SMartin K. Petersen 	}
1112341c2f8SMartin K. Petersen 
1124e4cbee9SChristoph Hellwig 	return BLK_STS_OK;
1132341c2f8SMartin K. Petersen }
1142341c2f8SMartin K. Petersen 
11510c41dddSMax Gurtovoy /**
11654d4e6abSMax Gurtovoy  * t10_pi_type1_prepare - prepare PI prior submitting request to device
11710c41dddSMax Gurtovoy  * @rq:              request with PI that should be prepared
11810c41dddSMax Gurtovoy  *
11910c41dddSMax Gurtovoy  * For Type 1/Type 2, the virtual start sector is the one that was
12010c41dddSMax Gurtovoy  * originally submitted by the block layer for the ref_tag usage. Due to
12110c41dddSMax Gurtovoy  * partitioning, MD/DM cloning, etc. the actual physical start sector is
12210c41dddSMax Gurtovoy  * likely to be different. Remap protection information to match the
12310c41dddSMax Gurtovoy  * physical LBA.
12410c41dddSMax Gurtovoy  */
t10_pi_type1_prepare(struct request * rq)12554d4e6abSMax Gurtovoy static void t10_pi_type1_prepare(struct request *rq)
12610c41dddSMax Gurtovoy {
127c6e56cf6SChristoph Hellwig 	struct blk_integrity *bi = &rq->q->limits.integrity;
12860d21aacSKanchan Joshi 	const int tuple_sz = bi->tuple_size;
12910c41dddSMax Gurtovoy 	u32 ref_tag = t10_pi_ref_tag(rq);
13060d21aacSKanchan Joshi 	u8 offset = bi->pi_offset;
13110c41dddSMax Gurtovoy 	struct bio *bio;
13210c41dddSMax Gurtovoy 
13310c41dddSMax Gurtovoy 	__rq_for_each_bio(bio, rq) {
13410c41dddSMax Gurtovoy 		struct bio_integrity_payload *bip = bio_integrity(bio);
13510c41dddSMax Gurtovoy 		u32 virt = bip_get_seed(bip) & 0xffffffff;
13610c41dddSMax Gurtovoy 		struct bio_vec iv;
13710c41dddSMax Gurtovoy 		struct bvec_iter iter;
13810c41dddSMax Gurtovoy 
13910c41dddSMax Gurtovoy 		/* Already remapped? */
14010c41dddSMax Gurtovoy 		if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
14110c41dddSMax Gurtovoy 			break;
14210c41dddSMax Gurtovoy 
14310c41dddSMax Gurtovoy 		bip_for_each_vec(iv, bip, iter) {
14410c41dddSMax Gurtovoy 			unsigned int j;
1458aec120aSChristoph Hellwig 			void *p;
14610c41dddSMax Gurtovoy 
1478aec120aSChristoph Hellwig 			p = bvec_kmap_local(&iv);
14810c41dddSMax Gurtovoy 			for (j = 0; j < iv.bv_len; j += tuple_sz) {
14960d21aacSKanchan Joshi 				struct t10_pi_tuple *pi = p + offset;
15010c41dddSMax Gurtovoy 
15110c41dddSMax Gurtovoy 				if (be32_to_cpu(pi->ref_tag) == virt)
15210c41dddSMax Gurtovoy 					pi->ref_tag = cpu_to_be32(ref_tag);
15310c41dddSMax Gurtovoy 				virt++;
15410c41dddSMax Gurtovoy 				ref_tag++;
15510c41dddSMax Gurtovoy 				p += tuple_sz;
15610c41dddSMax Gurtovoy 			}
1578aec120aSChristoph Hellwig 			kunmap_local(p);
15810c41dddSMax Gurtovoy 		}
15910c41dddSMax Gurtovoy 
16010c41dddSMax Gurtovoy 		bip->bip_flags |= BIP_MAPPED_INTEGRITY;
16110c41dddSMax Gurtovoy 	}
16210c41dddSMax Gurtovoy }
16310c41dddSMax Gurtovoy 
16410c41dddSMax Gurtovoy /**
16554d4e6abSMax Gurtovoy  * t10_pi_type1_complete - prepare PI prior returning request to the blk layer
16610c41dddSMax Gurtovoy  * @rq:              request with PI that should be prepared
16754d4e6abSMax Gurtovoy  * @nr_bytes:        total bytes to prepare
16810c41dddSMax Gurtovoy  *
16910c41dddSMax Gurtovoy  * For Type 1/Type 2, the virtual start sector is the one that was
17010c41dddSMax Gurtovoy  * originally submitted by the block layer for the ref_tag usage. Due to
17110c41dddSMax Gurtovoy  * partitioning, MD/DM cloning, etc. the actual physical start sector is
17210c41dddSMax Gurtovoy  * likely to be different. Since the physical start sector was submitted
17310c41dddSMax Gurtovoy  * to the device, we should remap it back to virtual values expected by the
17410c41dddSMax Gurtovoy  * block layer.
17510c41dddSMax Gurtovoy  */
t10_pi_type1_complete(struct request * rq,unsigned int nr_bytes)17654d4e6abSMax Gurtovoy static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
17710c41dddSMax Gurtovoy {
178c6e56cf6SChristoph Hellwig 	struct blk_integrity *bi = &rq->q->limits.integrity;
17960d21aacSKanchan Joshi 	unsigned intervals = nr_bytes >> bi->interval_exp;
18060d21aacSKanchan Joshi 	const int tuple_sz = bi->tuple_size;
18110c41dddSMax Gurtovoy 	u32 ref_tag = t10_pi_ref_tag(rq);
18260d21aacSKanchan Joshi 	u8 offset = bi->pi_offset;
18310c41dddSMax Gurtovoy 	struct bio *bio;
18410c41dddSMax Gurtovoy 
18510c41dddSMax Gurtovoy 	__rq_for_each_bio(bio, rq) {
18610c41dddSMax Gurtovoy 		struct bio_integrity_payload *bip = bio_integrity(bio);
18710c41dddSMax Gurtovoy 		u32 virt = bip_get_seed(bip) & 0xffffffff;
18810c41dddSMax Gurtovoy 		struct bio_vec iv;
18910c41dddSMax Gurtovoy 		struct bvec_iter iter;
19010c41dddSMax Gurtovoy 
19110c41dddSMax Gurtovoy 		bip_for_each_vec(iv, bip, iter) {
19210c41dddSMax Gurtovoy 			unsigned int j;
1938aec120aSChristoph Hellwig 			void *p;
19410c41dddSMax Gurtovoy 
1958aec120aSChristoph Hellwig 			p = bvec_kmap_local(&iv);
19610c41dddSMax Gurtovoy 			for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
19760d21aacSKanchan Joshi 				struct t10_pi_tuple *pi = p + offset;
19810c41dddSMax Gurtovoy 
19910c41dddSMax Gurtovoy 				if (be32_to_cpu(pi->ref_tag) == ref_tag)
20010c41dddSMax Gurtovoy 					pi->ref_tag = cpu_to_be32(virt);
20110c41dddSMax Gurtovoy 				virt++;
20210c41dddSMax Gurtovoy 				ref_tag++;
20310c41dddSMax Gurtovoy 				intervals--;
20410c41dddSMax Gurtovoy 				p += tuple_sz;
20510c41dddSMax Gurtovoy 			}
2068aec120aSChristoph Hellwig 			kunmap_local(p);
20710c41dddSMax Gurtovoy 		}
20810c41dddSMax Gurtovoy 	}
20910c41dddSMax Gurtovoy }
21054d4e6abSMax Gurtovoy 
ext_pi_crc64(u64 crc,void * data,unsigned int len)2116b5c132aSKanchan Joshi static __be64 ext_pi_crc64(u64 crc, void *data, unsigned int len)
212a7d4383fSKeith Busch {
2136b5c132aSKanchan Joshi 	return cpu_to_be64(crc64_rocksoft_update(crc, data, len));
214a7d4383fSKeith Busch }
215a7d4383fSKeith Busch 
ext_pi_crc64_generate(struct blk_integrity_iter * iter,struct blk_integrity * bi)216e9f5f44aSChristoph Hellwig static void ext_pi_crc64_generate(struct blk_integrity_iter *iter,
217e9f5f44aSChristoph Hellwig 		struct blk_integrity *bi)
218a7d4383fSKeith Busch {
219e9f5f44aSChristoph Hellwig 	u8 offset = bi->pi_offset;
220a7d4383fSKeith Busch 	unsigned int i;
221a7d4383fSKeith Busch 
222a7d4383fSKeith Busch 	for (i = 0 ; i < iter->data_size ; i += iter->interval) {
22360d21aacSKanchan Joshi 		struct crc64_pi_tuple *pi = iter->prot_buf + offset;
224a7d4383fSKeith Busch 
2256b5c132aSKanchan Joshi 		pi->guard_tag = ext_pi_crc64(0, iter->data_buf, iter->interval);
22660d21aacSKanchan Joshi 		if (offset)
22760d21aacSKanchan Joshi 			pi->guard_tag = ext_pi_crc64(be64_to_cpu(pi->guard_tag),
22860d21aacSKanchan Joshi 					iter->prot_buf, offset);
229a7d4383fSKeith Busch 		pi->app_tag = 0;
230a7d4383fSKeith Busch 
231e9f5f44aSChristoph Hellwig 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
232a7d4383fSKeith Busch 			put_unaligned_be48(iter->seed, pi->ref_tag);
233a7d4383fSKeith Busch 		else
234a7d4383fSKeith Busch 			put_unaligned_be48(0ULL, pi->ref_tag);
235a7d4383fSKeith Busch 
236a7d4383fSKeith Busch 		iter->data_buf += iter->interval;
237e9f5f44aSChristoph Hellwig 		iter->prot_buf += bi->tuple_size;
238a7d4383fSKeith Busch 		iter->seed++;
239a7d4383fSKeith Busch 	}
240a7d4383fSKeith Busch }
241a7d4383fSKeith Busch 
ext_pi_ref_escape(const u8 ref_tag[6])242a28dc358SAlexey Dobriyan static bool ext_pi_ref_escape(const u8 ref_tag[6])
243a7d4383fSKeith Busch {
244a28dc358SAlexey Dobriyan 	static const u8 ref_escape[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
245a7d4383fSKeith Busch 
246a7d4383fSKeith Busch 	return memcmp(ref_tag, ref_escape, sizeof(ref_escape)) == 0;
247a7d4383fSKeith Busch }
248a7d4383fSKeith Busch 
ext_pi_crc64_verify(struct blk_integrity_iter * iter,struct blk_integrity * bi)249a7d4383fSKeith Busch static blk_status_t ext_pi_crc64_verify(struct blk_integrity_iter *iter,
250e9f5f44aSChristoph Hellwig 		struct blk_integrity *bi)
251a7d4383fSKeith Busch {
252e9f5f44aSChristoph Hellwig 	u8 offset = bi->pi_offset;
253a7d4383fSKeith Busch 	unsigned int i;
254a7d4383fSKeith Busch 
255a7d4383fSKeith Busch 	for (i = 0; i < iter->data_size; i += iter->interval) {
25660d21aacSKanchan Joshi 		struct crc64_pi_tuple *pi = iter->prot_buf + offset;
257a7d4383fSKeith Busch 		u64 ref, seed;
258a7d4383fSKeith Busch 		__be64 csum;
259a7d4383fSKeith Busch 
260e9f5f44aSChristoph Hellwig 		if (bi->flags & BLK_INTEGRITY_REF_TAG) {
261a7d4383fSKeith Busch 			if (pi->app_tag == T10_PI_APP_ESCAPE)
262a7d4383fSKeith Busch 				goto next;
263a7d4383fSKeith Busch 
264a7d4383fSKeith Busch 			ref = get_unaligned_be48(pi->ref_tag);
265a7d4383fSKeith Busch 			seed = lower_48_bits(iter->seed);
266a7d4383fSKeith Busch 			if (ref != seed) {
267a7d4383fSKeith Busch 				pr_err("%s: ref tag error at location %llu (rcvd %llu)\n",
268a7d4383fSKeith Busch 					iter->disk_name, seed, ref);
269a7d4383fSKeith Busch 				return BLK_STS_PROTECTION;
270a7d4383fSKeith Busch 			}
271e9f5f44aSChristoph Hellwig 		} else {
272a7d4383fSKeith Busch 			if (pi->app_tag == T10_PI_APP_ESCAPE &&
273a7d4383fSKeith Busch 			    ext_pi_ref_escape(pi->ref_tag))
274a7d4383fSKeith Busch 				goto next;
275a7d4383fSKeith Busch 		}
276a7d4383fSKeith Busch 
2776b5c132aSKanchan Joshi 		csum = ext_pi_crc64(0, iter->data_buf, iter->interval);
27860d21aacSKanchan Joshi 		if (offset)
27960d21aacSKanchan Joshi 			csum = ext_pi_crc64(be64_to_cpu(csum), iter->prot_buf,
28060d21aacSKanchan Joshi 					    offset);
28160d21aacSKanchan Joshi 
282a7d4383fSKeith Busch 		if (pi->guard_tag != csum) {
283a7d4383fSKeith Busch 			pr_err("%s: guard tag error at sector %llu " \
284a7d4383fSKeith Busch 			       "(rcvd %016llx, want %016llx)\n",
285a7d4383fSKeith Busch 				iter->disk_name, (unsigned long long)iter->seed,
286a7d4383fSKeith Busch 				be64_to_cpu(pi->guard_tag), be64_to_cpu(csum));
287a7d4383fSKeith Busch 			return BLK_STS_PROTECTION;
288a7d4383fSKeith Busch 		}
289a7d4383fSKeith Busch 
290a7d4383fSKeith Busch next:
291a7d4383fSKeith Busch 		iter->data_buf += iter->interval;
292e9f5f44aSChristoph Hellwig 		iter->prot_buf += bi->tuple_size;
293a7d4383fSKeith Busch 		iter->seed++;
294a7d4383fSKeith Busch 	}
295a7d4383fSKeith Busch 
296a7d4383fSKeith Busch 	return BLK_STS_OK;
297a7d4383fSKeith Busch }
298a7d4383fSKeith Busch 
ext_pi_type1_prepare(struct request * rq)299a7d4383fSKeith Busch static void ext_pi_type1_prepare(struct request *rq)
300a7d4383fSKeith Busch {
301c6e56cf6SChristoph Hellwig 	struct blk_integrity *bi = &rq->q->limits.integrity;
30260d21aacSKanchan Joshi 	const int tuple_sz = bi->tuple_size;
303a7d4383fSKeith Busch 	u64 ref_tag = ext_pi_ref_tag(rq);
30460d21aacSKanchan Joshi 	u8 offset = bi->pi_offset;
305a7d4383fSKeith Busch 	struct bio *bio;
306a7d4383fSKeith Busch 
307a7d4383fSKeith Busch 	__rq_for_each_bio(bio, rq) {
308a7d4383fSKeith Busch 		struct bio_integrity_payload *bip = bio_integrity(bio);
309a7d4383fSKeith Busch 		u64 virt = lower_48_bits(bip_get_seed(bip));
310a7d4383fSKeith Busch 		struct bio_vec iv;
311a7d4383fSKeith Busch 		struct bvec_iter iter;
312a7d4383fSKeith Busch 
313a7d4383fSKeith Busch 		/* Already remapped? */
314a7d4383fSKeith Busch 		if (bip->bip_flags & BIP_MAPPED_INTEGRITY)
315a7d4383fSKeith Busch 			break;
316a7d4383fSKeith Busch 
317a7d4383fSKeith Busch 		bip_for_each_vec(iv, bip, iter) {
318a7d4383fSKeith Busch 			unsigned int j;
319a7d4383fSKeith Busch 			void *p;
320a7d4383fSKeith Busch 
321a7d4383fSKeith Busch 			p = bvec_kmap_local(&iv);
322a7d4383fSKeith Busch 			for (j = 0; j < iv.bv_len; j += tuple_sz) {
32360d21aacSKanchan Joshi 				struct crc64_pi_tuple *pi = p +  offset;
324a7d4383fSKeith Busch 				u64 ref = get_unaligned_be48(pi->ref_tag);
325a7d4383fSKeith Busch 
326a7d4383fSKeith Busch 				if (ref == virt)
327a7d4383fSKeith Busch 					put_unaligned_be48(ref_tag, pi->ref_tag);
328a7d4383fSKeith Busch 				virt++;
329a7d4383fSKeith Busch 				ref_tag++;
330a7d4383fSKeith Busch 				p += tuple_sz;
331a7d4383fSKeith Busch 			}
332a7d4383fSKeith Busch 			kunmap_local(p);
333a7d4383fSKeith Busch 		}
334a7d4383fSKeith Busch 
335a7d4383fSKeith Busch 		bip->bip_flags |= BIP_MAPPED_INTEGRITY;
336a7d4383fSKeith Busch 	}
337a7d4383fSKeith Busch }
338a7d4383fSKeith Busch 
ext_pi_type1_complete(struct request * rq,unsigned int nr_bytes)339a7d4383fSKeith Busch static void ext_pi_type1_complete(struct request *rq, unsigned int nr_bytes)
340a7d4383fSKeith Busch {
341c6e56cf6SChristoph Hellwig 	struct blk_integrity *bi = &rq->q->limits.integrity;
34260d21aacSKanchan Joshi 	unsigned intervals = nr_bytes >> bi->interval_exp;
34360d21aacSKanchan Joshi 	const int tuple_sz = bi->tuple_size;
344a7d4383fSKeith Busch 	u64 ref_tag = ext_pi_ref_tag(rq);
34560d21aacSKanchan Joshi 	u8 offset = bi->pi_offset;
346a7d4383fSKeith Busch 	struct bio *bio;
347a7d4383fSKeith Busch 
348a7d4383fSKeith Busch 	__rq_for_each_bio(bio, rq) {
349a7d4383fSKeith Busch 		struct bio_integrity_payload *bip = bio_integrity(bio);
350a7d4383fSKeith Busch 		u64 virt = lower_48_bits(bip_get_seed(bip));
351a7d4383fSKeith Busch 		struct bio_vec iv;
352a7d4383fSKeith Busch 		struct bvec_iter iter;
353a7d4383fSKeith Busch 
354a7d4383fSKeith Busch 		bip_for_each_vec(iv, bip, iter) {
355a7d4383fSKeith Busch 			unsigned int j;
356a7d4383fSKeith Busch 			void *p;
357a7d4383fSKeith Busch 
358a7d4383fSKeith Busch 			p = bvec_kmap_local(&iv);
359a7d4383fSKeith Busch 			for (j = 0; j < iv.bv_len && intervals; j += tuple_sz) {
36060d21aacSKanchan Joshi 				struct crc64_pi_tuple *pi = p + offset;
361a7d4383fSKeith Busch 				u64 ref = get_unaligned_be48(pi->ref_tag);
362a7d4383fSKeith Busch 
363a7d4383fSKeith Busch 				if (ref == ref_tag)
364a7d4383fSKeith Busch 					put_unaligned_be48(virt, pi->ref_tag);
365a7d4383fSKeith Busch 				virt++;
366a7d4383fSKeith Busch 				ref_tag++;
367a7d4383fSKeith Busch 				intervals--;
368a7d4383fSKeith Busch 				p += tuple_sz;
369a7d4383fSKeith Busch 			}
370a7d4383fSKeith Busch 			kunmap_local(p);
371a7d4383fSKeith Busch 		}
372a7d4383fSKeith Busch 	}
373a7d4383fSKeith Busch }
374a7d4383fSKeith Busch 
blk_integrity_generate(struct bio * bio)375d19b4634SChristoph Hellwig void blk_integrity_generate(struct bio *bio)
376a7d4383fSKeith Busch {
377d19b4634SChristoph Hellwig 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
378d19b4634SChristoph Hellwig 	struct bio_integrity_payload *bip = bio_integrity(bio);
379d19b4634SChristoph Hellwig 	struct blk_integrity_iter iter;
380d19b4634SChristoph Hellwig 	struct bvec_iter bviter;
381d19b4634SChristoph Hellwig 	struct bio_vec bv;
382d19b4634SChristoph Hellwig 
383d19b4634SChristoph Hellwig 	iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
384d19b4634SChristoph Hellwig 	iter.interval = 1 << bi->interval_exp;
385d19b4634SChristoph Hellwig 	iter.seed = bio->bi_iter.bi_sector;
386d19b4634SChristoph Hellwig 	iter.prot_buf = bvec_virt(bip->bip_vec);
387d19b4634SChristoph Hellwig 	bio_for_each_segment(bv, bio, bviter) {
388d19b4634SChristoph Hellwig 		void *kaddr = bvec_kmap_local(&bv);
389d19b4634SChristoph Hellwig 
390d19b4634SChristoph Hellwig 		iter.data_buf = kaddr;
391d19b4634SChristoph Hellwig 		iter.data_size = bv.bv_len;
392e9f5f44aSChristoph Hellwig 		switch (bi->csum_type) {
393e9f5f44aSChristoph Hellwig 		case BLK_INTEGRITY_CSUM_CRC64:
394d19b4634SChristoph Hellwig 			ext_pi_crc64_generate(&iter, bi);
395e9f5f44aSChristoph Hellwig 			break;
396e9f5f44aSChristoph Hellwig 		case BLK_INTEGRITY_CSUM_CRC:
397e9f5f44aSChristoph Hellwig 		case BLK_INTEGRITY_CSUM_IP:
398d19b4634SChristoph Hellwig 			t10_pi_generate(&iter, bi);
399e9f5f44aSChristoph Hellwig 			break;
400e9f5f44aSChristoph Hellwig 		default:
401e9f5f44aSChristoph Hellwig 			break;
402e9f5f44aSChristoph Hellwig 		}
403d19b4634SChristoph Hellwig 		kunmap_local(kaddr);
404d19b4634SChristoph Hellwig 	}
405a7d4383fSKeith Busch }
406a7d4383fSKeith Busch 
blk_integrity_verify(struct bio * bio)407d19b4634SChristoph Hellwig void blk_integrity_verify(struct bio *bio)
408a7d4383fSKeith Busch {
409d19b4634SChristoph Hellwig 	struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
410d19b4634SChristoph Hellwig 	struct bio_integrity_payload *bip = bio_integrity(bio);
411d19b4634SChristoph Hellwig 	struct blk_integrity_iter iter;
412d19b4634SChristoph Hellwig 	struct bvec_iter bviter;
413d19b4634SChristoph Hellwig 	struct bio_vec bv;
414d19b4634SChristoph Hellwig 
415d19b4634SChristoph Hellwig 	/*
416d19b4634SChristoph Hellwig 	 * At the moment verify is called bi_iter has been advanced during split
417d19b4634SChristoph Hellwig 	 * and completion, so use the copy created during submission here.
418d19b4634SChristoph Hellwig 	 */
419d19b4634SChristoph Hellwig 	iter.disk_name = bio->bi_bdev->bd_disk->disk_name;
420d19b4634SChristoph Hellwig 	iter.interval = 1 << bi->interval_exp;
421d19b4634SChristoph Hellwig 	iter.seed = bip->bio_iter.bi_sector;
422d19b4634SChristoph Hellwig 	iter.prot_buf = bvec_virt(bip->bip_vec);
423d19b4634SChristoph Hellwig 	__bio_for_each_segment(bv, bio, bviter, bip->bio_iter) {
424d19b4634SChristoph Hellwig 		void *kaddr = bvec_kmap_local(&bv);
425d19b4634SChristoph Hellwig 		blk_status_t ret = BLK_STS_OK;
426d19b4634SChristoph Hellwig 
427d19b4634SChristoph Hellwig 		iter.data_buf = kaddr;
428d19b4634SChristoph Hellwig 		iter.data_size = bv.bv_len;
429e9f5f44aSChristoph Hellwig 		switch (bi->csum_type) {
430e9f5f44aSChristoph Hellwig 		case BLK_INTEGRITY_CSUM_CRC64:
431d19b4634SChristoph Hellwig 			ret = ext_pi_crc64_verify(&iter, bi);
432d19b4634SChristoph Hellwig 			break;
433e9f5f44aSChristoph Hellwig 		case BLK_INTEGRITY_CSUM_CRC:
434e9f5f44aSChristoph Hellwig 		case BLK_INTEGRITY_CSUM_IP:
435d19b4634SChristoph Hellwig 			ret = t10_pi_verify(&iter, bi);
436d19b4634SChristoph Hellwig 			break;
437e9f5f44aSChristoph Hellwig 		default:
438d19b4634SChristoph Hellwig 			break;
439d19b4634SChristoph Hellwig 		}
440d19b4634SChristoph Hellwig 		kunmap_local(kaddr);
441d19b4634SChristoph Hellwig 
442d19b4634SChristoph Hellwig 		if (ret) {
443d19b4634SChristoph Hellwig 			bio->bi_status = ret;
444d19b4634SChristoph Hellwig 			return;
445d19b4634SChristoph Hellwig 		}
446e9f5f44aSChristoph Hellwig 	}
447a7d4383fSKeith Busch }
448a7d4383fSKeith Busch 
blk_integrity_prepare(struct request * rq)449e9f5f44aSChristoph Hellwig void blk_integrity_prepare(struct request *rq)
450e9f5f44aSChristoph Hellwig {
451c6e56cf6SChristoph Hellwig 	struct blk_integrity *bi = &rq->q->limits.integrity;
452a7d4383fSKeith Busch 
453e9f5f44aSChristoph Hellwig 	if (!(bi->flags & BLK_INTEGRITY_REF_TAG))
454e9f5f44aSChristoph Hellwig 		return;
455e9f5f44aSChristoph Hellwig 
456e9f5f44aSChristoph Hellwig 	if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC64)
457e9f5f44aSChristoph Hellwig 		ext_pi_type1_prepare(rq);
458e9f5f44aSChristoph Hellwig 	else
459e9f5f44aSChristoph Hellwig 		t10_pi_type1_prepare(rq);
460e9f5f44aSChristoph Hellwig }
461e9f5f44aSChristoph Hellwig 
blk_integrity_complete(struct request * rq,unsigned int nr_bytes)462e9f5f44aSChristoph Hellwig void blk_integrity_complete(struct request *rq, unsigned int nr_bytes)
463e9f5f44aSChristoph Hellwig {
464c6e56cf6SChristoph Hellwig 	struct blk_integrity *bi = &rq->q->limits.integrity;
465e9f5f44aSChristoph Hellwig 
466e9f5f44aSChristoph Hellwig 	if (!(bi->flags & BLK_INTEGRITY_REF_TAG))
467e9f5f44aSChristoph Hellwig 		return;
468e9f5f44aSChristoph Hellwig 
469e9f5f44aSChristoph Hellwig 	if (bi->csum_type == BLK_INTEGRITY_CSUM_CRC64)
470e9f5f44aSChristoph Hellwig 		ext_pi_type1_complete(rq, nr_bytes);
471e9f5f44aSChristoph Hellwig 	else
472e9f5f44aSChristoph Hellwig 		t10_pi_type1_complete(rq, nr_bytes);
473e9f5f44aSChristoph Hellwig }
474