xref: /linux/block/blk-integrity.c (revision f96a974170b749e3a56844e25b31d46a7233b6f6)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * blk-integrity.c - Block layer data integrity extensions
4  *
5  * Copyright (C) 2007, 2008 Oracle Corporation
6  * Written by: Martin K. Petersen <martin.petersen@oracle.com>
7  */
8 
9 #include <linux/blk-integrity.h>
10 #include <linux/backing-dev.h>
11 #include <linux/mempool.h>
12 #include <linux/bio.h>
13 #include <linux/scatterlist.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
16 
17 #include "blk.h"
18 
19 /**
20  * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
21  * @q:		request queue
22  * @bio:	bio with integrity metadata attached
23  *
24  * Description: Returns the number of elements required in a
25  * scatterlist corresponding to the integrity metadata in a bio.
26  */
27 int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
28 {
29 	struct bio_vec iv, ivprv = { NULL };
30 	unsigned int segments = 0;
31 	unsigned int seg_size = 0;
32 	struct bvec_iter iter;
33 	int prev = 0;
34 
35 	bio_for_each_integrity_vec(iv, bio, iter) {
36 
37 		if (prev) {
38 			if (!biovec_phys_mergeable(q, &ivprv, &iv))
39 				goto new_segment;
40 			if (seg_size + iv.bv_len > queue_max_segment_size(q))
41 				goto new_segment;
42 
43 			seg_size += iv.bv_len;
44 		} else {
45 new_segment:
46 			segments++;
47 			seg_size = iv.bv_len;
48 		}
49 
50 		prev = 1;
51 		ivprv = iv;
52 	}
53 
54 	return segments;
55 }
56 
57 /**
58  * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist
59  * @rq:		request to map
60  * @sglist:	target scatterlist
61  *
62  * Description: Map the integrity vectors in request into a
63  * scatterlist.  The scatterlist must be big enough to hold all
64  * elements.  I.e. sized using blk_rq_count_integrity_sg() or
65  * rq->nr_integrity_segments.
66  */
67 int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist)
68 {
69 	struct bio_vec iv, ivprv = { NULL };
70 	struct request_queue *q = rq->q;
71 	struct scatterlist *sg = NULL;
72 	struct bio *bio = rq->bio;
73 	unsigned int segments = 0;
74 	struct bvec_iter iter;
75 	int prev = 0;
76 
77 	bio_for_each_integrity_vec(iv, bio, iter) {
78 		if (prev) {
79 			if (!biovec_phys_mergeable(q, &ivprv, &iv))
80 				goto new_segment;
81 			if (sg->length + iv.bv_len > queue_max_segment_size(q))
82 				goto new_segment;
83 
84 			sg->length += iv.bv_len;
85 		} else {
86 new_segment:
87 			if (!sg)
88 				sg = sglist;
89 			else {
90 				sg_unmark_end(sg);
91 				sg = sg_next(sg);
92 			}
93 
94 			sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset);
95 			segments++;
96 		}
97 
98 		prev = 1;
99 		ivprv = iv;
100 	}
101 
102 	if (sg)
103 		sg_mark_end(sg);
104 
105 	/*
106 	 * Something must have been wrong if the figured number of segment
107 	 * is bigger than number of req's physical integrity segments
108 	 */
109 	BUG_ON(segments > rq->nr_integrity_segments);
110 	BUG_ON(segments > queue_max_integrity_segments(q));
111 	return segments;
112 }
113 EXPORT_SYMBOL(blk_rq_map_integrity_sg);
114 
115 int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
116 			      ssize_t bytes)
117 {
118 	int ret;
119 	struct iov_iter iter;
120 	unsigned int direction;
121 
122 	if (op_is_write(req_op(rq)))
123 		direction = ITER_DEST;
124 	else
125 		direction = ITER_SOURCE;
126 	iov_iter_ubuf(&iter, direction, ubuf, bytes);
127 	ret = bio_integrity_map_user(rq->bio, &iter);
128 	if (ret)
129 		return ret;
130 
131 	rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, rq->bio);
132 	rq->cmd_flags |= REQ_INTEGRITY;
133 	return 0;
134 }
135 EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user);
136 
137 bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
138 			    struct request *next)
139 {
140 	if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0)
141 		return true;
142 
143 	if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0)
144 		return false;
145 
146 	if (bio_integrity(req->bio)->bip_flags !=
147 	    bio_integrity(next->bio)->bip_flags)
148 		return false;
149 
150 	if (req->nr_integrity_segments + next->nr_integrity_segments >
151 	    q->limits.max_integrity_segments)
152 		return false;
153 
154 	if (integrity_req_gap_back_merge(req, next->bio))
155 		return false;
156 
157 	return true;
158 }
159 
160 bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
161 			     struct bio *bio)
162 {
163 	int nr_integrity_segs;
164 
165 	if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
166 		return true;
167 
168 	if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL)
169 		return false;
170 
171 	if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
172 		return false;
173 
174 	nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
175 	if (req->nr_integrity_segments + nr_integrity_segs >
176 	    q->limits.max_integrity_segments)
177 		return false;
178 
179 	return true;
180 }
181 
182 static inline struct blk_integrity *dev_to_bi(struct device *dev)
183 {
184 	return &dev_to_disk(dev)->queue->limits.integrity;
185 }
186 
187 const char *blk_integrity_profile_name(struct blk_integrity *bi)
188 {
189 	switch (bi->csum_type) {
190 	case BLK_INTEGRITY_CSUM_IP:
191 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
192 			return "T10-DIF-TYPE1-IP";
193 		return "T10-DIF-TYPE3-IP";
194 	case BLK_INTEGRITY_CSUM_CRC:
195 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
196 			return "T10-DIF-TYPE1-CRC";
197 		return "T10-DIF-TYPE3-CRC";
198 	case BLK_INTEGRITY_CSUM_CRC64:
199 		if (bi->flags & BLK_INTEGRITY_REF_TAG)
200 			return "EXT-DIF-TYPE1-CRC64";
201 		return "EXT-DIF-TYPE3-CRC64";
202 	case BLK_INTEGRITY_CSUM_NONE:
203 		break;
204 	}
205 
206 	return "nop";
207 }
208 EXPORT_SYMBOL_GPL(blk_integrity_profile_name);
209 
210 static ssize_t flag_store(struct device *dev, const char *page, size_t count,
211 		unsigned char flag)
212 {
213 	struct request_queue *q = dev_to_disk(dev)->queue;
214 	struct queue_limits lim;
215 	unsigned long val;
216 	int err;
217 
218 	err = kstrtoul(page, 10, &val);
219 	if (err)
220 		return err;
221 
222 	/* note that the flags are inverted vs the values in the sysfs files */
223 	lim = queue_limits_start_update(q);
224 	if (val)
225 		lim.integrity.flags &= ~flag;
226 	else
227 		lim.integrity.flags |= flag;
228 
229 	err = queue_limits_commit_update_frozen(q, &lim);
230 	if (err)
231 		return err;
232 	return count;
233 }
234 
235 static ssize_t flag_show(struct device *dev, char *page, unsigned char flag)
236 {
237 	struct blk_integrity *bi = dev_to_bi(dev);
238 
239 	return sysfs_emit(page, "%d\n", !(bi->flags & flag));
240 }
241 
242 static ssize_t format_show(struct device *dev, struct device_attribute *attr,
243 			   char *page)
244 {
245 	struct blk_integrity *bi = dev_to_bi(dev);
246 
247 	if (!bi->tuple_size)
248 		return sysfs_emit(page, "none\n");
249 	return sysfs_emit(page, "%s\n", blk_integrity_profile_name(bi));
250 }
251 
252 static ssize_t tag_size_show(struct device *dev, struct device_attribute *attr,
253 			     char *page)
254 {
255 	struct blk_integrity *bi = dev_to_bi(dev);
256 
257 	return sysfs_emit(page, "%u\n", bi->tag_size);
258 }
259 
260 static ssize_t protection_interval_bytes_show(struct device *dev,
261 					      struct device_attribute *attr,
262 					      char *page)
263 {
264 	struct blk_integrity *bi = dev_to_bi(dev);
265 
266 	return sysfs_emit(page, "%u\n",
267 			  bi->interval_exp ? 1 << bi->interval_exp : 0);
268 }
269 
270 static ssize_t read_verify_store(struct device *dev,
271 				 struct device_attribute *attr,
272 				 const char *page, size_t count)
273 {
274 	return flag_store(dev, page, count, BLK_INTEGRITY_NOVERIFY);
275 }
276 
277 static ssize_t read_verify_show(struct device *dev,
278 				struct device_attribute *attr, char *page)
279 {
280 	return flag_show(dev, page, BLK_INTEGRITY_NOVERIFY);
281 }
282 
283 static ssize_t write_generate_store(struct device *dev,
284 				    struct device_attribute *attr,
285 				    const char *page, size_t count)
286 {
287 	return flag_store(dev, page, count, BLK_INTEGRITY_NOGENERATE);
288 }
289 
290 static ssize_t write_generate_show(struct device *dev,
291 				   struct device_attribute *attr, char *page)
292 {
293 	return flag_show(dev, page, BLK_INTEGRITY_NOGENERATE);
294 }
295 
296 static ssize_t device_is_integrity_capable_show(struct device *dev,
297 						struct device_attribute *attr,
298 						char *page)
299 {
300 	struct blk_integrity *bi = dev_to_bi(dev);
301 
302 	return sysfs_emit(page, "%u\n",
303 			  !!(bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE));
304 }
305 
306 static DEVICE_ATTR_RO(format);
307 static DEVICE_ATTR_RO(tag_size);
308 static DEVICE_ATTR_RO(protection_interval_bytes);
309 static DEVICE_ATTR_RW(read_verify);
310 static DEVICE_ATTR_RW(write_generate);
311 static DEVICE_ATTR_RO(device_is_integrity_capable);
312 
313 static struct attribute *integrity_attrs[] = {
314 	&dev_attr_format.attr,
315 	&dev_attr_tag_size.attr,
316 	&dev_attr_protection_interval_bytes.attr,
317 	&dev_attr_read_verify.attr,
318 	&dev_attr_write_generate.attr,
319 	&dev_attr_device_is_integrity_capable.attr,
320 	NULL
321 };
322 
323 const struct attribute_group blk_integrity_attr_group = {
324 	.name = "integrity",
325 	.attrs = integrity_attrs,
326 };
327