xref: /linux/block/blk-integrity.c (revision 7942b226e6b84df13b46b76c01d3b6e07a1b349e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * blk-integrity.c - Block layer data integrity extensions
4  *
5  * Copyright (C) 2007, 2008 Oracle Corporation
6  * Written by: Martin K. Petersen <martin.petersen@oracle.com>
7  */
8 
9 #include <linux/blk-integrity.h>
10 #include <linux/backing-dev.h>
11 #include <linux/mempool.h>
12 #include <linux/bio.h>
13 #include <linux/scatterlist.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
16 #include <linux/t10-pi.h>
17 
18 #include "blk.h"
19 
20 /**
21  * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
22  * @q:		request queue
23  * @bio:	bio with integrity metadata attached
24  *
25  * Description: Returns the number of elements required in a
26  * scatterlist corresponding to the integrity metadata in a bio.
27  */
28 int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
29 {
30 	struct bio_vec iv, ivprv = { NULL };
31 	unsigned int segments = 0;
32 	unsigned int seg_size = 0;
33 	struct bvec_iter iter;
34 	int prev = 0;
35 
36 	bio_for_each_integrity_vec(iv, bio, iter) {
37 
38 		if (prev) {
39 			if (!biovec_phys_mergeable(q, &ivprv, &iv))
40 				goto new_segment;
41 			if (seg_size + iv.bv_len > queue_max_segment_size(q))
42 				goto new_segment;
43 
44 			seg_size += iv.bv_len;
45 		} else {
46 new_segment:
47 			segments++;
48 			seg_size = iv.bv_len;
49 		}
50 
51 		prev = 1;
52 		ivprv = iv;
53 	}
54 
55 	return segments;
56 }
57 
58 int blk_get_meta_cap(struct block_device *bdev, unsigned int cmd,
59 		     struct logical_block_metadata_cap __user *argp)
60 {
61 	struct blk_integrity *bi = blk_get_integrity(bdev->bd_disk);
62 	struct logical_block_metadata_cap meta_cap = {};
63 	size_t usize = _IOC_SIZE(cmd);
64 
65 	if (_IOC_DIR(cmd)  != _IOC_DIR(FS_IOC_GETLBMD_CAP) ||
66 	    _IOC_TYPE(cmd) != _IOC_TYPE(FS_IOC_GETLBMD_CAP) ||
67 	    _IOC_NR(cmd)   != _IOC_NR(FS_IOC_GETLBMD_CAP) ||
68 	    _IOC_SIZE(cmd) < LBMD_SIZE_VER0)
69 		return -ENOIOCTLCMD;
70 
71 	if (!bi)
72 		goto out;
73 
74 	if (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE)
75 		meta_cap.lbmd_flags |= LBMD_PI_CAP_INTEGRITY;
76 	if (bi->flags & BLK_INTEGRITY_REF_TAG)
77 		meta_cap.lbmd_flags |= LBMD_PI_CAP_REFTAG;
78 	meta_cap.lbmd_interval = 1 << bi->interval_exp;
79 	meta_cap.lbmd_size = bi->metadata_size;
80 	meta_cap.lbmd_pi_size = bi->pi_tuple_size;
81 	meta_cap.lbmd_pi_offset = bi->pi_offset;
82 	meta_cap.lbmd_opaque_size = bi->metadata_size - bi->pi_tuple_size;
83 	if (meta_cap.lbmd_opaque_size && !bi->pi_offset)
84 		meta_cap.lbmd_opaque_offset = bi->pi_tuple_size;
85 
86 	switch (bi->csum_type) {
87 	case BLK_INTEGRITY_CSUM_NONE:
88 		meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_NONE;
89 		break;
90 	case BLK_INTEGRITY_CSUM_IP:
91 		meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_IP;
92 		break;
93 	case BLK_INTEGRITY_CSUM_CRC:
94 		meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_CRC16_T10DIF;
95 		break;
96 	case BLK_INTEGRITY_CSUM_CRC64:
97 		meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_CRC64_NVME;
98 		break;
99 	}
100 
101 	if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE)
102 		meta_cap.lbmd_app_tag_size = 2;
103 
104 	if (bi->flags & BLK_INTEGRITY_REF_TAG) {
105 		switch (bi->csum_type) {
106 		case BLK_INTEGRITY_CSUM_CRC64:
107 			meta_cap.lbmd_ref_tag_size =
108 				sizeof_field(struct crc64_pi_tuple, ref_tag);
109 			break;
110 		case BLK_INTEGRITY_CSUM_CRC:
111 		case BLK_INTEGRITY_CSUM_IP:
112 			meta_cap.lbmd_ref_tag_size =
113 				sizeof_field(struct t10_pi_tuple, ref_tag);
114 			break;
115 		default:
116 			break;
117 		}
118 	}
119 
120 out:
121 	return copy_struct_to_user(argp, usize, &meta_cap, sizeof(meta_cap),
122 				   NULL);
123 }
124 
125 int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
126 			      ssize_t bytes)
127 {
128 	int ret;
129 	struct iov_iter iter;
130 
131 	iov_iter_ubuf(&iter, rq_data_dir(rq), ubuf, bytes);
132 	ret = bio_integrity_map_user(rq->bio, &iter);
133 	if (ret)
134 		return ret;
135 
136 	rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, rq->bio);
137 	rq->cmd_flags |= REQ_INTEGRITY;
138 	return 0;
139 }
140 EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user);
141 
142 bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
143 			    struct request *next)
144 {
145 	if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0)
146 		return true;
147 
148 	if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0)
149 		return false;
150 
151 	if (bio_integrity(req->bio)->bip_flags !=
152 	    bio_integrity(next->bio)->bip_flags)
153 		return false;
154 
155 	if (req->nr_integrity_segments + next->nr_integrity_segments >
156 	    q->limits.max_integrity_segments)
157 		return false;
158 
159 	if (integrity_req_gap_back_merge(req, next->bio))
160 		return false;
161 
162 	return true;
163 }
164 
165 bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
166 			     struct bio *bio)
167 {
168 	int nr_integrity_segs;
169 
170 	if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
171 		return true;
172 
173 	if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL)
174 		return false;
175 
176 	if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
177 		return false;
178 
179 	nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
180 	if (req->nr_integrity_segments + nr_integrity_segs >
181 	    q->limits.max_integrity_segments)
182 		return false;
183 
184 	return true;
185 }
186 
187 static inline struct blk_integrity *dev_to_bi(struct device *dev)
188 {
189 	return &dev_to_disk(dev)->queue->limits.integrity;
190 }
191 
192 const char *blk_integrity_profile_name(struct blk_integrity *bi)
193 {
194 	switch (bi->csum_type) {
195 	case BLK_INTEGRITY_CSUM_IP:
196 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
197 			return "T10-DIF-TYPE1-IP";
198 		return "T10-DIF-TYPE3-IP";
199 	case BLK_INTEGRITY_CSUM_CRC:
200 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
201 			return "T10-DIF-TYPE1-CRC";
202 		return "T10-DIF-TYPE3-CRC";
203 	case BLK_INTEGRITY_CSUM_CRC64:
204 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
205 			return "EXT-DIF-TYPE1-CRC64";
206 		return "EXT-DIF-TYPE3-CRC64";
207 	case BLK_INTEGRITY_CSUM_NONE:
208 		break;
209 	}
210 
211 	return "nop";
212 }
213 EXPORT_SYMBOL_GPL(blk_integrity_profile_name);
214 
215 static ssize_t flag_store(struct device *dev, const char *page, size_t count,
216 		unsigned char flag)
217 {
218 	struct request_queue *q = dev_to_disk(dev)->queue;
219 	struct queue_limits lim;
220 	unsigned long val;
221 	int err;
222 
223 	err = kstrtoul(page, 10, &val);
224 	if (err)
225 		return err;
226 
227 	/* note that the flags are inverted vs the values in the sysfs files */
228 	lim = queue_limits_start_update(q);
229 	if (val)
230 		lim.integrity.flags &= ~flag;
231 	else
232 		lim.integrity.flags |= flag;
233 
234 	err = queue_limits_commit_update_frozen(q, &lim);
235 	if (err)
236 		return err;
237 	return count;
238 }
239 
240 static ssize_t flag_show(struct device *dev, char *page, unsigned char flag)
241 {
242 	struct blk_integrity *bi = dev_to_bi(dev);
243 
244 	return sysfs_emit(page, "%d\n", !(bi->flags & flag));
245 }
246 
247 static ssize_t format_show(struct device *dev, struct device_attribute *attr,
248 			   char *page)
249 {
250 	struct blk_integrity *bi = dev_to_bi(dev);
251 
252 	if (!bi->metadata_size)
253 		return sysfs_emit(page, "none\n");
254 	return sysfs_emit(page, "%s\n", blk_integrity_profile_name(bi));
255 }
256 
257 static ssize_t tag_size_show(struct device *dev, struct device_attribute *attr,
258 			     char *page)
259 {
260 	struct blk_integrity *bi = dev_to_bi(dev);
261 
262 	return sysfs_emit(page, "%u\n", bi->tag_size);
263 }
264 
265 static ssize_t protection_interval_bytes_show(struct device *dev,
266 					      struct device_attribute *attr,
267 					      char *page)
268 {
269 	struct blk_integrity *bi = dev_to_bi(dev);
270 
271 	return sysfs_emit(page, "%u\n",
272 			  bi->interval_exp ? 1 << bi->interval_exp : 0);
273 }
274 
275 static ssize_t read_verify_store(struct device *dev,
276 				 struct device_attribute *attr,
277 				 const char *page, size_t count)
278 {
279 	return flag_store(dev, page, count, BLK_INTEGRITY_NOVERIFY);
280 }
281 
282 static ssize_t read_verify_show(struct device *dev,
283 				struct device_attribute *attr, char *page)
284 {
285 	return flag_show(dev, page, BLK_INTEGRITY_NOVERIFY);
286 }
287 
288 static ssize_t write_generate_store(struct device *dev,
289 				    struct device_attribute *attr,
290 				    const char *page, size_t count)
291 {
292 	return flag_store(dev, page, count, BLK_INTEGRITY_NOGENERATE);
293 }
294 
295 static ssize_t write_generate_show(struct device *dev,
296 				   struct device_attribute *attr, char *page)
297 {
298 	return flag_show(dev, page, BLK_INTEGRITY_NOGENERATE);
299 }
300 
301 static ssize_t device_is_integrity_capable_show(struct device *dev,
302 						struct device_attribute *attr,
303 						char *page)
304 {
305 	struct blk_integrity *bi = dev_to_bi(dev);
306 
307 	return sysfs_emit(page, "%u\n",
308 			  !!(bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE));
309 }
310 
311 static DEVICE_ATTR_RO(format);
312 static DEVICE_ATTR_RO(tag_size);
313 static DEVICE_ATTR_RO(protection_interval_bytes);
314 static DEVICE_ATTR_RW(read_verify);
315 static DEVICE_ATTR_RW(write_generate);
316 static DEVICE_ATTR_RO(device_is_integrity_capable);
317 
318 static struct attribute *integrity_attrs[] = {
319 	&dev_attr_format.attr,
320 	&dev_attr_tag_size.attr,
321 	&dev_attr_protection_interval_bytes.attr,
322 	&dev_attr_read_verify.attr,
323 	&dev_attr_write_generate.attr,
324 	&dev_attr_device_is_integrity_capable.attr,
325 	NULL
326 };
327 
328 const struct attribute_group blk_integrity_attr_group = {
329 	.name = "integrity",
330 	.attrs = integrity_attrs,
331 };
332