xref: /linux/block/blk-integrity.c (revision 69e4b75a5b90ef74300c283c0aafe8d41daf13a8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * blk-integrity.c - Block layer data integrity extensions
4  *
5  * Copyright (C) 2007, 2008 Oracle Corporation
6  * Written by: Martin K. Petersen <martin.petersen@oracle.com>
7  */
8 
9 #include <linux/blk-integrity.h>
10 #include <linux/backing-dev.h>
11 #include <linux/mempool.h>
12 #include <linux/bio.h>
13 #include <linux/scatterlist.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
16 #include <linux/t10-pi.h>
17 
18 #include "blk.h"
19 
20 /**
21  * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
22  * @q:		request queue
23  * @bio:	bio with integrity metadata attached
24  *
25  * Description: Returns the number of elements required in a
26  * scatterlist corresponding to the integrity metadata in a bio.
27  */
28 int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
29 {
30 	struct bio_vec iv, ivprv = { NULL };
31 	unsigned int segments = 0;
32 	unsigned int seg_size = 0;
33 	struct bvec_iter iter;
34 	int prev = 0;
35 
36 	bio_for_each_integrity_vec(iv, bio, iter) {
37 
38 		if (prev) {
39 			if (!biovec_phys_mergeable(q, &ivprv, &iv))
40 				goto new_segment;
41 			if (seg_size + iv.bv_len > queue_max_segment_size(q))
42 				goto new_segment;
43 
44 			seg_size += iv.bv_len;
45 		} else {
46 new_segment:
47 			segments++;
48 			seg_size = iv.bv_len;
49 		}
50 
51 		prev = 1;
52 		ivprv = iv;
53 	}
54 
55 	return segments;
56 }
57 
58 int blk_get_meta_cap(struct block_device *bdev, unsigned int cmd,
59 		     struct logical_block_metadata_cap __user *argp)
60 {
61 	struct blk_integrity *bi;
62 	struct logical_block_metadata_cap meta_cap = {};
63 	size_t usize = _IOC_SIZE(cmd);
64 
65 	if (!extensible_ioctl_valid(cmd, FS_IOC_GETLBMD_CAP, LBMD_SIZE_VER0))
66 		return -ENOIOCTLCMD;
67 
68 	bi = blk_get_integrity(bdev->bd_disk);
69 	if (!bi)
70 		goto out;
71 
72 	if (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE)
73 		meta_cap.lbmd_flags |= LBMD_PI_CAP_INTEGRITY;
74 	if (bi->flags & BLK_INTEGRITY_REF_TAG)
75 		meta_cap.lbmd_flags |= LBMD_PI_CAP_REFTAG;
76 	meta_cap.lbmd_interval = 1 << bi->interval_exp;
77 	meta_cap.lbmd_size = bi->metadata_size;
78 	meta_cap.lbmd_pi_size = bi->pi_tuple_size;
79 	meta_cap.lbmd_pi_offset = bi->pi_offset;
80 	meta_cap.lbmd_opaque_size = bi->metadata_size - bi->pi_tuple_size;
81 	if (meta_cap.lbmd_opaque_size && !bi->pi_offset)
82 		meta_cap.lbmd_opaque_offset = bi->pi_tuple_size;
83 
84 	switch (bi->csum_type) {
85 	case BLK_INTEGRITY_CSUM_NONE:
86 		meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_NONE;
87 		break;
88 	case BLK_INTEGRITY_CSUM_IP:
89 		meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_IP;
90 		break;
91 	case BLK_INTEGRITY_CSUM_CRC:
92 		meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_CRC16_T10DIF;
93 		break;
94 	case BLK_INTEGRITY_CSUM_CRC64:
95 		meta_cap.lbmd_guard_tag_type = LBMD_PI_CSUM_CRC64_NVME;
96 		break;
97 	}
98 
99 	if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE)
100 		meta_cap.lbmd_app_tag_size = 2;
101 
102 	if (bi->flags & BLK_INTEGRITY_REF_TAG) {
103 		switch (bi->csum_type) {
104 		case BLK_INTEGRITY_CSUM_CRC64:
105 			meta_cap.lbmd_ref_tag_size =
106 				sizeof_field(struct crc64_pi_tuple, ref_tag);
107 			break;
108 		case BLK_INTEGRITY_CSUM_CRC:
109 		case BLK_INTEGRITY_CSUM_IP:
110 			meta_cap.lbmd_ref_tag_size =
111 				sizeof_field(struct t10_pi_tuple, ref_tag);
112 			break;
113 		default:
114 			break;
115 		}
116 	}
117 
118 out:
119 	return copy_struct_to_user(argp, usize, &meta_cap, sizeof(meta_cap),
120 				   NULL);
121 }
122 
123 int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
124 			      ssize_t bytes)
125 {
126 	int ret;
127 	struct iov_iter iter;
128 
129 	iov_iter_ubuf(&iter, rq_data_dir(rq), ubuf, bytes);
130 	ret = bio_integrity_map_user(rq->bio, &iter);
131 	if (ret)
132 		return ret;
133 
134 	rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, rq->bio);
135 	rq->cmd_flags |= REQ_INTEGRITY;
136 	return 0;
137 }
138 EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user);
139 
140 bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
141 			    struct request *next)
142 {
143 	if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0)
144 		return true;
145 
146 	if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0)
147 		return false;
148 
149 	if (bio_integrity(req->bio)->bip_flags !=
150 	    bio_integrity(next->bio)->bip_flags)
151 		return false;
152 
153 	if (req->nr_integrity_segments + next->nr_integrity_segments >
154 	    q->limits.max_integrity_segments)
155 		return false;
156 
157 	if (integrity_req_gap_back_merge(req, next->bio))
158 		return false;
159 
160 	return true;
161 }
162 
163 bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
164 			     struct bio *bio)
165 {
166 	int nr_integrity_segs;
167 
168 	if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
169 		return true;
170 
171 	if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL)
172 		return false;
173 
174 	if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
175 		return false;
176 
177 	nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
178 	if (req->nr_integrity_segments + nr_integrity_segs >
179 	    q->limits.max_integrity_segments)
180 		return false;
181 
182 	return true;
183 }
184 
185 static inline struct blk_integrity *dev_to_bi(struct device *dev)
186 {
187 	return &dev_to_disk(dev)->queue->limits.integrity;
188 }
189 
190 const char *blk_integrity_profile_name(struct blk_integrity *bi)
191 {
192 	switch (bi->csum_type) {
193 	case BLK_INTEGRITY_CSUM_IP:
194 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
195 			return "T10-DIF-TYPE1-IP";
196 		return "T10-DIF-TYPE3-IP";
197 	case BLK_INTEGRITY_CSUM_CRC:
198 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
199 			return "T10-DIF-TYPE1-CRC";
200 		return "T10-DIF-TYPE3-CRC";
201 	case BLK_INTEGRITY_CSUM_CRC64:
202 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
203 			return "EXT-DIF-TYPE1-CRC64";
204 		return "EXT-DIF-TYPE3-CRC64";
205 	case BLK_INTEGRITY_CSUM_NONE:
206 		break;
207 	}
208 
209 	return "nop";
210 }
211 EXPORT_SYMBOL_GPL(blk_integrity_profile_name);
212 
213 static ssize_t flag_store(struct device *dev, const char *page, size_t count,
214 		unsigned char flag)
215 {
216 	struct request_queue *q = dev_to_disk(dev)->queue;
217 	struct queue_limits lim;
218 	unsigned long val;
219 	int err;
220 
221 	err = kstrtoul(page, 10, &val);
222 	if (err)
223 		return err;
224 
225 	/* note that the flags are inverted vs the values in the sysfs files */
226 	lim = queue_limits_start_update(q);
227 	if (val)
228 		lim.integrity.flags &= ~flag;
229 	else
230 		lim.integrity.flags |= flag;
231 
232 	err = queue_limits_commit_update_frozen(q, &lim);
233 	if (err)
234 		return err;
235 	return count;
236 }
237 
238 static ssize_t flag_show(struct device *dev, char *page, unsigned char flag)
239 {
240 	struct blk_integrity *bi = dev_to_bi(dev);
241 
242 	return sysfs_emit(page, "%d\n", !(bi->flags & flag));
243 }
244 
245 static ssize_t format_show(struct device *dev, struct device_attribute *attr,
246 			   char *page)
247 {
248 	struct blk_integrity *bi = dev_to_bi(dev);
249 
250 	if (!bi->metadata_size)
251 		return sysfs_emit(page, "none\n");
252 	return sysfs_emit(page, "%s\n", blk_integrity_profile_name(bi));
253 }
254 
255 static ssize_t tag_size_show(struct device *dev, struct device_attribute *attr,
256 			     char *page)
257 {
258 	struct blk_integrity *bi = dev_to_bi(dev);
259 
260 	return sysfs_emit(page, "%u\n", bi->tag_size);
261 }
262 
263 static ssize_t protection_interval_bytes_show(struct device *dev,
264 					      struct device_attribute *attr,
265 					      char *page)
266 {
267 	struct blk_integrity *bi = dev_to_bi(dev);
268 
269 	return sysfs_emit(page, "%u\n",
270 			  bi->interval_exp ? 1 << bi->interval_exp : 0);
271 }
272 
273 static ssize_t read_verify_store(struct device *dev,
274 				 struct device_attribute *attr,
275 				 const char *page, size_t count)
276 {
277 	return flag_store(dev, page, count, BLK_INTEGRITY_NOVERIFY);
278 }
279 
280 static ssize_t read_verify_show(struct device *dev,
281 				struct device_attribute *attr, char *page)
282 {
283 	return flag_show(dev, page, BLK_INTEGRITY_NOVERIFY);
284 }
285 
286 static ssize_t write_generate_store(struct device *dev,
287 				    struct device_attribute *attr,
288 				    const char *page, size_t count)
289 {
290 	return flag_store(dev, page, count, BLK_INTEGRITY_NOGENERATE);
291 }
292 
293 static ssize_t write_generate_show(struct device *dev,
294 				   struct device_attribute *attr, char *page)
295 {
296 	return flag_show(dev, page, BLK_INTEGRITY_NOGENERATE);
297 }
298 
299 static ssize_t device_is_integrity_capable_show(struct device *dev,
300 						struct device_attribute *attr,
301 						char *page)
302 {
303 	struct blk_integrity *bi = dev_to_bi(dev);
304 
305 	return sysfs_emit(page, "%u\n",
306 			  !!(bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE));
307 }
308 
309 static DEVICE_ATTR_RO(format);
310 static DEVICE_ATTR_RO(tag_size);
311 static DEVICE_ATTR_RO(protection_interval_bytes);
312 static DEVICE_ATTR_RW(read_verify);
313 static DEVICE_ATTR_RW(write_generate);
314 static DEVICE_ATTR_RO(device_is_integrity_capable);
315 
316 static struct attribute *integrity_attrs[] = {
317 	&dev_attr_format.attr,
318 	&dev_attr_tag_size.attr,
319 	&dev_attr_protection_interval_bytes.attr,
320 	&dev_attr_read_verify.attr,
321 	&dev_attr_write_generate.attr,
322 	&dev_attr_device_is_integrity_capable.attr,
323 	NULL
324 };
325 
326 const struct attribute_group blk_integrity_attr_group = {
327 	.name = "integrity",
328 	.attrs = integrity_attrs,
329 };
330