xref: /linux/block/blk-integrity.c (revision bf52ca5912c07664276c7b94db820fa2d638b681)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * blk-integrity.c - Block layer data integrity extensions
4  *
5  * Copyright (C) 2007, 2008 Oracle Corporation
6  * Written by: Martin K. Petersen <martin.petersen@oracle.com>
7  */
8 
9 #include <linux/blk-integrity.h>
10 #include <linux/backing-dev.h>
11 #include <linux/mempool.h>
12 #include <linux/bio.h>
13 #include <linux/scatterlist.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
16 
17 #include "blk.h"
18 
19 /**
20  * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
21  * @q:		request queue
22  * @bio:	bio with integrity metadata attached
23  *
24  * Description: Returns the number of elements required in a
25  * scatterlist corresponding to the integrity metadata in a bio.
26  */
27 int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
28 {
29 	struct bio_vec iv, ivprv = { NULL };
30 	unsigned int segments = 0;
31 	unsigned int seg_size = 0;
32 	struct bvec_iter iter;
33 	int prev = 0;
34 
35 	bio_for_each_integrity_vec(iv, bio, iter) {
36 
37 		if (prev) {
38 			if (!biovec_phys_mergeable(q, &ivprv, &iv))
39 				goto new_segment;
40 			if (seg_size + iv.bv_len > queue_max_segment_size(q))
41 				goto new_segment;
42 
43 			seg_size += iv.bv_len;
44 		} else {
45 new_segment:
46 			segments++;
47 			seg_size = iv.bv_len;
48 		}
49 
50 		prev = 1;
51 		ivprv = iv;
52 	}
53 
54 	return segments;
55 }
56 
57 /**
58  * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist
59  * @q:		request queue
60  * @bio:	bio with integrity metadata attached
61  * @sglist:	target scatterlist
62  *
63  * Description: Map the integrity vectors in request into a
64  * scatterlist.  The scatterlist must be big enough to hold all
65  * elements.  I.e. sized using blk_rq_count_integrity_sg() or
66  * rq->nr_integrity_segments.
67  */
68 int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist)
69 {
70 	struct bio_vec iv, ivprv = { NULL };
71 	struct request_queue *q = rq->q;
72 	struct scatterlist *sg = NULL;
73 	struct bio *bio = rq->bio;
74 	unsigned int segments = 0;
75 	struct bvec_iter iter;
76 	int prev = 0;
77 
78 	bio_for_each_integrity_vec(iv, bio, iter) {
79 		if (prev) {
80 			if (!biovec_phys_mergeable(q, &ivprv, &iv))
81 				goto new_segment;
82 			if (sg->length + iv.bv_len > queue_max_segment_size(q))
83 				goto new_segment;
84 
85 			sg->length += iv.bv_len;
86 		} else {
87 new_segment:
88 			if (!sg)
89 				sg = sglist;
90 			else {
91 				sg_unmark_end(sg);
92 				sg = sg_next(sg);
93 			}
94 
95 			sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset);
96 			segments++;
97 		}
98 
99 		prev = 1;
100 		ivprv = iv;
101 	}
102 
103 	if (sg)
104 		sg_mark_end(sg);
105 
106 	/*
107 	 * Something must have been wrong if the figured number of segment
108 	 * is bigger than number of req's physical integrity segments
109 	 */
110 	BUG_ON(segments > rq->nr_integrity_segments);
111 	BUG_ON(segments > queue_max_integrity_segments(q));
112 	return segments;
113 }
114 EXPORT_SYMBOL(blk_rq_map_integrity_sg);
115 
116 int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
117 			      ssize_t bytes, u32 seed)
118 {
119 	int ret = bio_integrity_map_user(rq->bio, ubuf, bytes, seed);
120 
121 	if (ret)
122 		return ret;
123 
124 	rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, rq->bio);
125 	rq->cmd_flags |= REQ_INTEGRITY;
126 	return 0;
127 }
128 EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user);
129 
130 bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
131 			    struct request *next)
132 {
133 	if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0)
134 		return true;
135 
136 	if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0)
137 		return false;
138 
139 	if (bio_integrity(req->bio)->bip_flags !=
140 	    bio_integrity(next->bio)->bip_flags)
141 		return false;
142 
143 	if (req->nr_integrity_segments + next->nr_integrity_segments >
144 	    q->limits.max_integrity_segments)
145 		return false;
146 
147 	if (integrity_req_gap_back_merge(req, next->bio))
148 		return false;
149 
150 	return true;
151 }
152 
153 bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
154 			     struct bio *bio)
155 {
156 	int nr_integrity_segs;
157 
158 	if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
159 		return true;
160 
161 	if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL)
162 		return false;
163 
164 	if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
165 		return false;
166 
167 	nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
168 	if (req->nr_integrity_segments + nr_integrity_segs >
169 	    q->limits.max_integrity_segments)
170 		return false;
171 
172 	return true;
173 }
174 
175 static inline struct blk_integrity *dev_to_bi(struct device *dev)
176 {
177 	return &dev_to_disk(dev)->queue->limits.integrity;
178 }
179 
180 const char *blk_integrity_profile_name(struct blk_integrity *bi)
181 {
182 	switch (bi->csum_type) {
183 	case BLK_INTEGRITY_CSUM_IP:
184 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
185 			return "T10-DIF-TYPE1-IP";
186 		return "T10-DIF-TYPE3-IP";
187 	case BLK_INTEGRITY_CSUM_CRC:
188 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
189 			return "T10-DIF-TYPE1-CRC";
190 		return "T10-DIF-TYPE3-CRC";
191 	case BLK_INTEGRITY_CSUM_CRC64:
192 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
193 			return "EXT-DIF-TYPE1-CRC64";
194 		return "EXT-DIF-TYPE3-CRC64";
195 	case BLK_INTEGRITY_CSUM_NONE:
196 		break;
197 	}
198 
199 	return "nop";
200 }
201 EXPORT_SYMBOL_GPL(blk_integrity_profile_name);
202 
203 static ssize_t flag_store(struct device *dev, const char *page, size_t count,
204 		unsigned char flag)
205 {
206 	struct request_queue *q = dev_to_disk(dev)->queue;
207 	struct queue_limits lim;
208 	unsigned long val;
209 	int err;
210 
211 	err = kstrtoul(page, 10, &val);
212 	if (err)
213 		return err;
214 
215 	/* note that the flags are inverted vs the values in the sysfs files */
216 	lim = queue_limits_start_update(q);
217 	if (val)
218 		lim.integrity.flags &= ~flag;
219 	else
220 		lim.integrity.flags |= flag;
221 
222 	blk_mq_freeze_queue(q);
223 	err = queue_limits_commit_update(q, &lim);
224 	blk_mq_unfreeze_queue(q);
225 	if (err)
226 		return err;
227 	return count;
228 }
229 
230 static ssize_t flag_show(struct device *dev, char *page, unsigned char flag)
231 {
232 	struct blk_integrity *bi = dev_to_bi(dev);
233 
234 	return sysfs_emit(page, "%d\n", !(bi->flags & flag));
235 }
236 
237 static ssize_t format_show(struct device *dev, struct device_attribute *attr,
238 			   char *page)
239 {
240 	struct blk_integrity *bi = dev_to_bi(dev);
241 
242 	if (!bi->tuple_size)
243 		return sysfs_emit(page, "none\n");
244 	return sysfs_emit(page, "%s\n", blk_integrity_profile_name(bi));
245 }
246 
247 static ssize_t tag_size_show(struct device *dev, struct device_attribute *attr,
248 			     char *page)
249 {
250 	struct blk_integrity *bi = dev_to_bi(dev);
251 
252 	return sysfs_emit(page, "%u\n", bi->tag_size);
253 }
254 
255 static ssize_t protection_interval_bytes_show(struct device *dev,
256 					      struct device_attribute *attr,
257 					      char *page)
258 {
259 	struct blk_integrity *bi = dev_to_bi(dev);
260 
261 	return sysfs_emit(page, "%u\n",
262 			  bi->interval_exp ? 1 << bi->interval_exp : 0);
263 }
264 
265 static ssize_t read_verify_store(struct device *dev,
266 				 struct device_attribute *attr,
267 				 const char *page, size_t count)
268 {
269 	return flag_store(dev, page, count, BLK_INTEGRITY_NOVERIFY);
270 }
271 
272 static ssize_t read_verify_show(struct device *dev,
273 				struct device_attribute *attr, char *page)
274 {
275 	return flag_show(dev, page, BLK_INTEGRITY_NOVERIFY);
276 }
277 
278 static ssize_t write_generate_store(struct device *dev,
279 				    struct device_attribute *attr,
280 				    const char *page, size_t count)
281 {
282 	return flag_store(dev, page, count, BLK_INTEGRITY_NOGENERATE);
283 }
284 
285 static ssize_t write_generate_show(struct device *dev,
286 				   struct device_attribute *attr, char *page)
287 {
288 	return flag_show(dev, page, BLK_INTEGRITY_NOGENERATE);
289 }
290 
291 static ssize_t device_is_integrity_capable_show(struct device *dev,
292 						struct device_attribute *attr,
293 						char *page)
294 {
295 	struct blk_integrity *bi = dev_to_bi(dev);
296 
297 	return sysfs_emit(page, "%u\n",
298 			  !!(bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE));
299 }
300 
301 static DEVICE_ATTR_RO(format);
302 static DEVICE_ATTR_RO(tag_size);
303 static DEVICE_ATTR_RO(protection_interval_bytes);
304 static DEVICE_ATTR_RW(read_verify);
305 static DEVICE_ATTR_RW(write_generate);
306 static DEVICE_ATTR_RO(device_is_integrity_capable);
307 
308 static struct attribute *integrity_attrs[] = {
309 	&dev_attr_format.attr,
310 	&dev_attr_tag_size.attr,
311 	&dev_attr_protection_interval_bytes.attr,
312 	&dev_attr_read_verify.attr,
313 	&dev_attr_write_generate.attr,
314 	&dev_attr_device_is_integrity_capable.attr,
315 	NULL
316 };
317 
318 const struct attribute_group blk_integrity_attr_group = {
319 	.name = "integrity",
320 	.attrs = integrity_attrs,
321 };
322