1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * blk-integrity.c - Block layer data integrity extensions
4 *
5 * Copyright (C) 2007, 2008 Oracle Corporation
6 * Written by: Martin K. Petersen <martin.petersen@oracle.com>
7 */
8
9 #include <linux/blk-integrity.h>
10 #include <linux/backing-dev.h>
11 #include <linux/mempool.h>
12 #include <linux/bio.h>
13 #include <linux/scatterlist.h>
14 #include <linux/export.h>
15 #include <linux/slab.h>
16
17 #include "blk.h"
18
19 /**
20 * blk_rq_count_integrity_sg - Count number of integrity scatterlist elements
21 * @q: request queue
22 * @bio: bio with integrity metadata attached
23 *
24 * Description: Returns the number of elements required in a
25 * scatterlist corresponding to the integrity metadata in a bio.
26 */
blk_rq_count_integrity_sg(struct request_queue * q,struct bio * bio)27 int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
28 {
29 struct bio_vec iv, ivprv = { NULL };
30 unsigned int segments = 0;
31 unsigned int seg_size = 0;
32 struct bvec_iter iter;
33 int prev = 0;
34
35 bio_for_each_integrity_vec(iv, bio, iter) {
36
37 if (prev) {
38 if (!biovec_phys_mergeable(q, &ivprv, &iv))
39 goto new_segment;
40 if (seg_size + iv.bv_len > queue_max_segment_size(q))
41 goto new_segment;
42
43 seg_size += iv.bv_len;
44 } else {
45 new_segment:
46 segments++;
47 seg_size = iv.bv_len;
48 }
49
50 prev = 1;
51 ivprv = iv;
52 }
53
54 return segments;
55 }
56
57 /**
58 * blk_rq_map_integrity_sg - Map integrity metadata into a scatterlist
59 * @rq: request to map
60 * @sglist: target scatterlist
61 *
62 * Description: Map the integrity vectors in request into a
63 * scatterlist. The scatterlist must be big enough to hold all
64 * elements. I.e. sized using blk_rq_count_integrity_sg() or
65 * rq->nr_integrity_segments.
66 */
blk_rq_map_integrity_sg(struct request * rq,struct scatterlist * sglist)67 int blk_rq_map_integrity_sg(struct request *rq, struct scatterlist *sglist)
68 {
69 struct bio_vec iv, ivprv = { NULL };
70 struct request_queue *q = rq->q;
71 struct scatterlist *sg = NULL;
72 struct bio *bio = rq->bio;
73 unsigned int segments = 0;
74 struct bvec_iter iter;
75 int prev = 0;
76
77 bio_for_each_integrity_vec(iv, bio, iter) {
78 if (prev) {
79 if (!biovec_phys_mergeable(q, &ivprv, &iv))
80 goto new_segment;
81 if (sg->length + iv.bv_len > queue_max_segment_size(q))
82 goto new_segment;
83
84 sg->length += iv.bv_len;
85 } else {
86 new_segment:
87 if (!sg)
88 sg = sglist;
89 else {
90 sg_unmark_end(sg);
91 sg = sg_next(sg);
92 }
93
94 sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset);
95 segments++;
96 }
97
98 prev = 1;
99 ivprv = iv;
100 }
101
102 if (sg)
103 sg_mark_end(sg);
104
105 /*
106 * Something must have been wrong if the figured number of segment
107 * is bigger than number of req's physical integrity segments
108 */
109 BUG_ON(segments > rq->nr_integrity_segments);
110 BUG_ON(segments > queue_max_integrity_segments(q));
111 return segments;
112 }
113 EXPORT_SYMBOL(blk_rq_map_integrity_sg);
114
blk_rq_integrity_map_user(struct request * rq,void __user * ubuf,ssize_t bytes,u32 seed)115 int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf,
116 ssize_t bytes, u32 seed)
117 {
118 int ret = bio_integrity_map_user(rq->bio, ubuf, bytes, seed);
119
120 if (ret)
121 return ret;
122
123 rq->nr_integrity_segments = blk_rq_count_integrity_sg(rq->q, rq->bio);
124 rq->cmd_flags |= REQ_INTEGRITY;
125 return 0;
126 }
127 EXPORT_SYMBOL_GPL(blk_rq_integrity_map_user);
128
blk_integrity_merge_rq(struct request_queue * q,struct request * req,struct request * next)129 bool blk_integrity_merge_rq(struct request_queue *q, struct request *req,
130 struct request *next)
131 {
132 if (blk_integrity_rq(req) == 0 && blk_integrity_rq(next) == 0)
133 return true;
134
135 if (blk_integrity_rq(req) == 0 || blk_integrity_rq(next) == 0)
136 return false;
137
138 if (bio_integrity(req->bio)->bip_flags !=
139 bio_integrity(next->bio)->bip_flags)
140 return false;
141
142 if (req->nr_integrity_segments + next->nr_integrity_segments >
143 q->limits.max_integrity_segments)
144 return false;
145
146 if (integrity_req_gap_back_merge(req, next->bio))
147 return false;
148
149 return true;
150 }
151
blk_integrity_merge_bio(struct request_queue * q,struct request * req,struct bio * bio)152 bool blk_integrity_merge_bio(struct request_queue *q, struct request *req,
153 struct bio *bio)
154 {
155 int nr_integrity_segs;
156
157 if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL)
158 return true;
159
160 if (blk_integrity_rq(req) == 0 || bio_integrity(bio) == NULL)
161 return false;
162
163 if (bio_integrity(req->bio)->bip_flags != bio_integrity(bio)->bip_flags)
164 return false;
165
166 nr_integrity_segs = blk_rq_count_integrity_sg(q, bio);
167 if (req->nr_integrity_segments + nr_integrity_segs >
168 q->limits.max_integrity_segments)
169 return false;
170
171 return true;
172 }
173
dev_to_bi(struct device * dev)174 static inline struct blk_integrity *dev_to_bi(struct device *dev)
175 {
176 return &dev_to_disk(dev)->queue->limits.integrity;
177 }
178
blk_integrity_profile_name(struct blk_integrity * bi)179 const char *blk_integrity_profile_name(struct blk_integrity *bi)
180 {
181 switch (bi->csum_type) {
182 case BLK_INTEGRITY_CSUM_IP:
183 if (bi->flags & BLK_INTEGRITY_REF_TAG)
184 return "T10-DIF-TYPE1-IP";
185 return "T10-DIF-TYPE3-IP";
186 case BLK_INTEGRITY_CSUM_CRC:
187 if (bi->flags & BLK_INTEGRITY_REF_TAG)
188 return "T10-DIF-TYPE1-CRC";
189 return "T10-DIF-TYPE3-CRC";
190 case BLK_INTEGRITY_CSUM_CRC64:
191 if (bi->flags & BLK_INTEGRITY_REF_TAG)
192 return "EXT-DIF-TYPE1-CRC64";
193 return "EXT-DIF-TYPE3-CRC64";
194 case BLK_INTEGRITY_CSUM_NONE:
195 break;
196 }
197
198 return "nop";
199 }
200 EXPORT_SYMBOL_GPL(blk_integrity_profile_name);
201
flag_store(struct device * dev,const char * page,size_t count,unsigned char flag)202 static ssize_t flag_store(struct device *dev, const char *page, size_t count,
203 unsigned char flag)
204 {
205 struct request_queue *q = dev_to_disk(dev)->queue;
206 struct queue_limits lim;
207 unsigned long val;
208 int err;
209
210 err = kstrtoul(page, 10, &val);
211 if (err)
212 return err;
213
214 /* note that the flags are inverted vs the values in the sysfs files */
215 lim = queue_limits_start_update(q);
216 if (val)
217 lim.integrity.flags &= ~flag;
218 else
219 lim.integrity.flags |= flag;
220
221 blk_mq_freeze_queue(q);
222 err = queue_limits_commit_update(q, &lim);
223 blk_mq_unfreeze_queue(q);
224 if (err)
225 return err;
226 return count;
227 }
228
flag_show(struct device * dev,char * page,unsigned char flag)229 static ssize_t flag_show(struct device *dev, char *page, unsigned char flag)
230 {
231 struct blk_integrity *bi = dev_to_bi(dev);
232
233 return sysfs_emit(page, "%d\n", !(bi->flags & flag));
234 }
235
format_show(struct device * dev,struct device_attribute * attr,char * page)236 static ssize_t format_show(struct device *dev, struct device_attribute *attr,
237 char *page)
238 {
239 struct blk_integrity *bi = dev_to_bi(dev);
240
241 if (!bi->tuple_size)
242 return sysfs_emit(page, "none\n");
243 return sysfs_emit(page, "%s\n", blk_integrity_profile_name(bi));
244 }
245
tag_size_show(struct device * dev,struct device_attribute * attr,char * page)246 static ssize_t tag_size_show(struct device *dev, struct device_attribute *attr,
247 char *page)
248 {
249 struct blk_integrity *bi = dev_to_bi(dev);
250
251 return sysfs_emit(page, "%u\n", bi->tag_size);
252 }
253
protection_interval_bytes_show(struct device * dev,struct device_attribute * attr,char * page)254 static ssize_t protection_interval_bytes_show(struct device *dev,
255 struct device_attribute *attr,
256 char *page)
257 {
258 struct blk_integrity *bi = dev_to_bi(dev);
259
260 return sysfs_emit(page, "%u\n",
261 bi->interval_exp ? 1 << bi->interval_exp : 0);
262 }
263
read_verify_store(struct device * dev,struct device_attribute * attr,const char * page,size_t count)264 static ssize_t read_verify_store(struct device *dev,
265 struct device_attribute *attr,
266 const char *page, size_t count)
267 {
268 return flag_store(dev, page, count, BLK_INTEGRITY_NOVERIFY);
269 }
270
read_verify_show(struct device * dev,struct device_attribute * attr,char * page)271 static ssize_t read_verify_show(struct device *dev,
272 struct device_attribute *attr, char *page)
273 {
274 return flag_show(dev, page, BLK_INTEGRITY_NOVERIFY);
275 }
276
write_generate_store(struct device * dev,struct device_attribute * attr,const char * page,size_t count)277 static ssize_t write_generate_store(struct device *dev,
278 struct device_attribute *attr,
279 const char *page, size_t count)
280 {
281 return flag_store(dev, page, count, BLK_INTEGRITY_NOGENERATE);
282 }
283
write_generate_show(struct device * dev,struct device_attribute * attr,char * page)284 static ssize_t write_generate_show(struct device *dev,
285 struct device_attribute *attr, char *page)
286 {
287 return flag_show(dev, page, BLK_INTEGRITY_NOGENERATE);
288 }
289
device_is_integrity_capable_show(struct device * dev,struct device_attribute * attr,char * page)290 static ssize_t device_is_integrity_capable_show(struct device *dev,
291 struct device_attribute *attr,
292 char *page)
293 {
294 struct blk_integrity *bi = dev_to_bi(dev);
295
296 return sysfs_emit(page, "%u\n",
297 !!(bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE));
298 }
299
300 static DEVICE_ATTR_RO(format);
301 static DEVICE_ATTR_RO(tag_size);
302 static DEVICE_ATTR_RO(protection_interval_bytes);
303 static DEVICE_ATTR_RW(read_verify);
304 static DEVICE_ATTR_RW(write_generate);
305 static DEVICE_ATTR_RO(device_is_integrity_capable);
306
307 static struct attribute *integrity_attrs[] = {
308 &dev_attr_format.attr,
309 &dev_attr_tag_size.attr,
310 &dev_attr_protection_interval_bytes.attr,
311 &dev_attr_read_verify.attr,
312 &dev_attr_write_generate.attr,
313 &dev_attr_device_is_integrity_capable.attr,
314 NULL
315 };
316
317 const struct attribute_group blk_integrity_attr_group = {
318 .name = "integrity",
319 .attrs = integrity_attrs,
320 };
321