xref: /linux/block/blk-integrity.c (revision 30bbcb44707a97fcb62246bebc8b413b5ab293f8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * blk-integrity.c - Block layer data integrity extensions
4  *
5  * Copyright (C) 2007, 2008 Oracle Corporation
6  * Written by: Martin K. Petersen <martin.petersen@oracle.com>
7  */
8 
9 #include <linux/blk-integrity.h>
10 #include <linux/backing-dev.h>
11 #include <linux/mempool.h>
12 #include <linux/bio.h>
13 #include <linux/scatterlist.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
16 #include <linux/t10-pi.h>
17 
18 #include "blk.h"
19 
20 /**
21  * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
22  * @q:		request queue
23  * @bio:	bio with integrity metadata attached
24  *
25  * Description: Returns the number of elements required in a
26  * scatterlist corresponding to the integrity metadata in a bio.
27  */
28 int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
29 {
30 	struct bio_vec iv, ivprv = { NULL };
31 	unsigned int segments = 0;
32 	unsigned int seg_size = 0;
33 	struct bvec_iter iter;
34 	int prev = 0;
35 
36 	bio_for_each_integrity_vec(iv, bio, iter) {
37 
38 		if (prev) {
39 			if (!biovec_phys_mergeable(q, &ivprv, &iv))
40 				goto new_segment;
41 			if (seg_size + iv.bv_len > queue_max_segment_size(q))
42 				goto new_segment;
43 
44 			seg_size += iv.bv_len;
45 		} else {
46 new_segment:
47 			segments++;
48 			seg_size = iv.bv_len;
49 		}
50 
51 		prev = 1;
52 		ivprv = iv;
53 	}
54 
55 	return segments;
56 }
57 
58 int blk_get_meta_cap(struct block_device *bdev, unsigned int cmd,
59 		     struct logical_block_metadata_cap __user *argp)
60 {
61 	struct blk_integrity *bi;
62 	struct logical_block_metadata_cap meta_cap = {};
63 	size_t usize = _IOC_SIZE(cmd);
64 
65 	if (!extensible_ioctl_valid(cmd, FS_IOC_GETLBMD_CAP, LBMD_SIZE_VER0))
66 		return -ENOIOCTLCMD;
67 
68 	bi = blk_get_integrity(bdev->bd_disk);
69 	if (!bi)
70 		goto out;
71 
72 	if (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE)
73 		meta_cap.lbmd_flags |= LBMD_PI_CAP_INTEGRITY;
74 	if (bi->flags & BLK_INTEGRITY_REF_TAG)
75 		meta_cap.lbmd_flags |= LBMD_PI_CAP_REFTAG;
76 	meta_cap.lbmd_interval = 1 << bi->interval_exp;
77 	meta_cap.lbmd_size = bi->metadata_size;
78 	meta_cap.lbmd_pi_size = bi->pi_tuple_size;
79 	meta_cap.lbmd_pi_offset = bi->pi_offset;
80 	meta_cap.lbmd_opaque_size = bi->metadata_size - bi->pi_tuple_size;
81 	if (meta_cap.lbmd_opaque_size && !bi->pi_offset)
82 		meta_cap.lbmd_opaque_offset = bi->pi_tuple_size;
83 
84 	switch (bi->csum_type) {
85 	case BLK_INTEGRITY_CSUM_NONE:
86 		meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_NONE;
87 		break;
88 	case BLK_INTEGRITY_CSUM_IP:
89 		meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_IP;
90 		break;
91 	case BLK_INTEGRITY_CSUM_CRC:
92 		meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_CRC16_T10DIF;
93 		break;
94 	case BLK_INTEGRITY_CSUM_CRC64:
95 		meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_CRC64_NVME;
96 		break;
97 	}
98 
99 	if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE)
100 		meta_cap.lbmd_app_tag_size = 2;
101 
102 	if (bi->flags & BLK_INTEGRITY_REF_TAG) {
103 		switch (bi->csum_type) {
104 		case BLK_INTEGRITY_CSUM_CRC64:
105 			meta_cap.lbmd_ref_tag_size =
106 				sizeof_field(struct crc64_pi_tuple, ref_tag);
107 			break;
108 		case BLK_INTEGRITY_CSUM_CRC:
109 		case BLK_INTEGRITY_CSUM_IP:
110 			meta_cap.lbmd_ref_tag_size =
111 				sizeof_field(struct t10_pi_tuple, ref_tag);
112 			break;
113 		default:
114 			break;
115 		}
116 	}
117 
118 out:
119 	return copy_struct_to_user(argp, usize, &meta_cap, sizeof(meta_cap),
120 				   NULL);
121 }
122 
123 /**
124  * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist
125  * @rq:		request to map
126  * @sglist:	target scatterlist
127  *
128  * Description: Map the integrity vectors in request into a
129  * scatterlist.  The scatterlist must be big enough to hold all
130  * elements.  I.e. sized using blk_rq_count_integrity_sg() or
131  * rq->nr_integrity_segments.
132  */
133 int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist)
134 {
135 	struct bio_vec iv, ivprv = { NULL };
136 	struct request_queue *q = rq->q;
137 	struct scatterlist *sg = NULL;
138 	struct bio *bio = rq->bio;
139 	unsigned int segments = 0;
140 	struct bvec_iter iter;
141 	int prev = 0;
142 
143 	bio_for_each_integrity_vec(iv, bio, iter) {
144 		if (prev) {
145 			if (!biovec_phys_mergeable(q, &ivprv, &iv))
146 				goto new_segment;
147 			if (sg->length + iv.bv_len > queue_max_segment_size(q))
148 				goto new_segment;
149 
150 			sg->length += iv.bv_len;
151 		} else {
152 new_segment:
153 			if (!sg)
154 				sg = sglist;
155 			else {
156 				sg_unmark_end(sg);
157 				sg = sg_next(sg);
158 			}
159 
160 			sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset);
161 			segments++;
162 		}
163 
164 		prev = 1;
165 		ivprv = iv;
166 	}
167 
168 	if (sg)
169 		sg_mark_end(sg);
170 
171 	/*
172 	 * Something must have been wrong if the figured number of segment
173 	 * is bigger than number of req's physical integrity segments
174 	 */
175 	BUG_ON(segments > rq->nr_integrity_segments);
176 	BUG_ON(segments > queue_max_integrity_segments(q));
177 	return segments;
178 }
179 EXPORT_SYMBOL(blk_rq_map_integrity_sg);
180 
181 int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
182 			      ssize_t bytes)
183 {
184 	int ret;
185 	struct iov_iter iter;
186 
187 	iov_iter_ubuf(&iter, rq_data_dir(rq), ubuf, bytes);
188 	ret = bio_integrity_map_user(rq->bio, &iter);
189 	if (ret)
190 		return ret;
191 
192 	rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, rq->bio);
193 	rq->cmd_flags |= REQ_INTEGRITY;
194 	return 0;
195 }
196 EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user);
197 
198 bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
199 			    struct request *next)
200 {
201 	if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0)
202 		return true;
203 
204 	if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0)
205 		return false;
206 
207 	if (bio_integrity(req->bio)->bip_flags !=
208 	    bio_integrity(next->bio)->bip_flags)
209 		return false;
210 
211 	if (req->nr_integrity_segments + next->nr_integrity_segments >
212 	    q->limits.max_integrity_segments)
213 		return false;
214 
215 	if (integrity_req_gap_back_merge(req, next->bio))
216 		return false;
217 
218 	return true;
219 }
220 
221 bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
222 			     struct bio *bio)
223 {
224 	int nr_integrity_segs;
225 
226 	if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
227 		return true;
228 
229 	if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL)
230 		return false;
231 
232 	if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
233 		return false;
234 
235 	nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
236 	if (req->nr_integrity_segments + nr_integrity_segs >
237 	    q->limits.max_integrity_segments)
238 		return false;
239 
240 	return true;
241 }
242 
243 static inline struct blk_integrity *dev_to_bi(struct device *dev)
244 {
245 	return &dev_to_disk(dev)->queue->limits.integrity;
246 }
247 
248 const char *blk_integrity_profile_name(struct blk_integrity *bi)
249 {
250 	switch (bi->csum_type) {
251 	case BLK_INTEGRITY_CSUM_IP:
252 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
253 			return "T10-DIF-TYPE1-IP";
254 		return "T10-DIF-TYPE3-IP";
255 	case BLK_INTEGRITY_CSUM_CRC:
256 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
257 			return "T10-DIF-TYPE1-CRC";
258 		return "T10-DIF-TYPE3-CRC";
259 	case BLK_INTEGRITY_CSUM_CRC64:
260 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
261 			return "EXT-DIF-TYPE1-CRC64";
262 		return "EXT-DIF-TYPE3-CRC64";
263 	case BLK_INTEGRITY_CSUM_NONE:
264 		break;
265 	}
266 
267 	return "nop";
268 }
269 EXPORT_SYMBOL_GPL(blk_integrity_profile_name);
270 
271 static ssize_t flag_store(struct device *dev, const char *page, size_t count,
272 		unsigned char flag)
273 {
274 	struct request_queue *q = dev_to_disk(dev)->queue;
275 	struct queue_limits lim;
276 	unsigned long val;
277 	int err;
278 
279 	err = kstrtoul(page, 10, &val);
280 	if (err)
281 		return err;
282 
283 	/* note that the flags are inverted vs the values in the sysfs files */
284 	lim = queue_limits_start_update(q);
285 	if (val)
286 		lim.integrity.flags &= ~flag;
287 	else
288 		lim.integrity.flags |= flag;
289 
290 	err = queue_limits_commit_update_frozen(q, &lim);
291 	if (err)
292 		return err;
293 	return count;
294 }
295 
296 static ssize_t flag_show(struct device *dev, char *page, unsigned char flag)
297 {
298 	struct blk_integrity *bi = dev_to_bi(dev);
299 
300 	return sysfs_emit(page, "%d\n", !(bi->flags & flag));
301 }
302 
303 static ssize_t format_show(struct device *dev, struct device_attribute *attr,
304 			   char *page)
305 {
306 	struct blk_integrity *bi = dev_to_bi(dev);
307 
308 	if (!bi->metadata_size)
309 		return sysfs_emit(page, "none\n");
310 	return sysfs_emit(page, "%s\n", blk_integrity_profile_name(bi));
311 }
312 
313 static ssize_t tag_size_show(struct device *dev, struct device_attribute *attr,
314 			     char *page)
315 {
316 	struct blk_integrity *bi = dev_to_bi(dev);
317 
318 	return sysfs_emit(page, "%u\n", bi->tag_size);
319 }
320 
321 static ssize_t protection_interval_bytes_show(struct device *dev,
322 					      struct device_attribute *attr,
323 					      char *page)
324 {
325 	struct blk_integrity *bi = dev_to_bi(dev);
326 
327 	return sysfs_emit(page, "%u\n",
328 			  bi->interval_exp ? 1 << bi->interval_exp : 0);
329 }
330 
331 static ssize_t read_verify_store(struct device *dev,
332 				 struct device_attribute *attr,
333 				 const char *page, size_t count)
334 {
335 	return flag_store(dev, page, count, BLK_INTEGRITY_NOVERIFY);
336 }
337 
338 static ssize_t read_verify_show(struct device *dev,
339 				struct device_attribute *attr, char *page)
340 {
341 	return flag_show(dev, page, BLK_INTEGRITY_NOVERIFY);
342 }
343 
344 static ssize_t write_generate_store(struct device *dev,
345 				    struct device_attribute *attr,
346 				    const char *page, size_t count)
347 {
348 	return flag_store(dev, page, count, BLK_INTEGRITY_NOGENERATE);
349 }
350 
351 static ssize_t write_generate_show(struct device *dev,
352 				   struct device_attribute *attr, char *page)
353 {
354 	return flag_show(dev, page, BLK_INTEGRITY_NOGENERATE);
355 }
356 
357 static ssize_t device_is_integrity_capable_show(struct device *dev,
358 						struct device_attribute *attr,
359 						char *page)
360 {
361 	struct blk_integrity *bi = dev_to_bi(dev);
362 
363 	return sysfs_emit(page, "%u\n",
364 			  !!(bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE));
365 }
366 
367 static DEVICE_ATTR_RO(format);
368 static DEVICE_ATTR_RO(tag_size);
369 static DEVICE_ATTR_RO(protection_interval_bytes);
370 static DEVICE_ATTR_RW(read_verify);
371 static DEVICE_ATTR_RW(write_generate);
372 static DEVICE_ATTR_RO(device_is_integrity_capable);
373 
374 static struct attribute *integrity_attrs[] = {
375 	&dev_attr_format.attr,
376 	&dev_attr_tag_size.attr,
377 	&dev_attr_protection_interval_bytes.attr,
378 	&dev_attr_read_verify.attr,
379 	&dev_attr_write_generate.attr,
380 	&dev_attr_device_is_integrity_capable.attr,
381 	NULL
382 };
383 
384 const struct attribute_group blk_integrity_attr_group = {
385 	.name = "integrity",
386 	.attrs = integrity_attrs,
387 };
388