xref: /linux/block/blk-map.c (revision 6ea76f33e9ab99c7888547e1acba2baf8e4b5b17)
1 /*
2  * Functions related to mapping data to requests
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/uio.h>
9 
10 #include "blk.h"
11 
12 /*
13  * Append a bio to a passthrough request.  Only works can be merged into
14  * the request based on the driver constraints.
15  */
16 int blk_rq_append_bio(struct request *rq, struct bio *bio)
17 {
18 	if (!rq->bio) {
19 		rq->cmd_flags &= REQ_OP_MASK;
20 		rq->cmd_flags |= (bio->bi_opf & REQ_OP_MASK);
21 		blk_rq_bio_prep(rq->q, rq, bio);
22 	} else {
23 		if (!ll_back_merge_fn(rq->q, rq, bio))
24 			return -EINVAL;
25 
26 		rq->biotail->bi_next = bio;
27 		rq->biotail = bio;
28 		rq->__data_len += bio->bi_iter.bi_size;
29 	}
30 
31 	return 0;
32 }
33 EXPORT_SYMBOL(blk_rq_append_bio);
34 
35 static int __blk_rq_unmap_user(struct bio *bio)
36 {
37 	int ret = 0;
38 
39 	if (bio) {
40 		if (bio_flagged(bio, BIO_USER_MAPPED))
41 			bio_unmap_user(bio);
42 		else
43 			ret = bio_uncopy_user(bio);
44 	}
45 
46 	return ret;
47 }
48 
49 static int __blk_rq_map_user_iov(struct request *rq,
50 		struct rq_map_data *map_data, struct iov_iter *iter,
51 		gfp_t gfp_mask, bool copy)
52 {
53 	struct request_queue *q = rq->q;
54 	struct bio *bio, *orig_bio;
55 	int ret;
56 
57 	if (copy)
58 		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
59 	else
60 		bio = bio_map_user_iov(q, iter, gfp_mask);
61 
62 	if (IS_ERR(bio))
63 		return PTR_ERR(bio);
64 
65 	if (map_data && map_data->null_mapped)
66 		bio_set_flag(bio, BIO_NULL_MAPPED);
67 
68 	iov_iter_advance(iter, bio->bi_iter.bi_size);
69 	if (map_data)
70 		map_data->offset += bio->bi_iter.bi_size;
71 
72 	orig_bio = bio;
73 	blk_queue_bounce(q, &bio);
74 
75 	/*
76 	 * We link the bounce buffer in and could have to traverse it
77 	 * later so we have to get a ref to prevent it from being freed
78 	 */
79 	bio_get(bio);
80 
81 	ret = blk_rq_append_bio(rq, bio);
82 	if (ret) {
83 		bio_endio(bio);
84 		__blk_rq_unmap_user(orig_bio);
85 		bio_put(bio);
86 		return ret;
87 	}
88 
89 	return 0;
90 }
91 
92 /**
93  * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
94  * @q:		request queue where request should be inserted
95  * @rq:		request to map data to
96  * @map_data:   pointer to the rq_map_data holding pages (if necessary)
97  * @iter:	iovec iterator
98  * @gfp_mask:	memory allocation flags
99  *
100  * Description:
101  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
102  *    a kernel bounce buffer is used.
103  *
104  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
105  *    still in process context.
106  *
107  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
108  *    before being submitted to the device, as pages mapped may be out of
109  *    reach. It's the callers responsibility to make sure this happens. The
110  *    original bio must be passed back in to blk_rq_unmap_user() for proper
111  *    unmapping.
112  */
113 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
114 			struct rq_map_data *map_data,
115 			const struct iov_iter *iter, gfp_t gfp_mask)
116 {
117 	bool copy = false;
118 	unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
119 	struct bio *bio = NULL;
120 	struct iov_iter i;
121 	int ret;
122 
123 	if (map_data)
124 		copy = true;
125 	else if (iov_iter_alignment(iter) & align)
126 		copy = true;
127 	else if (queue_virt_boundary(q))
128 		copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
129 
130 	i = *iter;
131 	do {
132 		ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
133 		if (ret)
134 			goto unmap_rq;
135 		if (!bio)
136 			bio = rq->bio;
137 	} while (iov_iter_count(&i));
138 
139 	if (!bio_flagged(bio, BIO_USER_MAPPED))
140 		rq->rq_flags |= RQF_COPY_USER;
141 	return 0;
142 
143 unmap_rq:
144 	__blk_rq_unmap_user(bio);
145 	rq->bio = NULL;
146 	return -EINVAL;
147 }
148 EXPORT_SYMBOL(blk_rq_map_user_iov);
149 
150 int blk_rq_map_user(struct request_queue *q, struct request *rq,
151 		    struct rq_map_data *map_data, void __user *ubuf,
152 		    unsigned long len, gfp_t gfp_mask)
153 {
154 	struct iovec iov;
155 	struct iov_iter i;
156 	int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
157 
158 	if (unlikely(ret < 0))
159 		return ret;
160 
161 	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
162 }
163 EXPORT_SYMBOL(blk_rq_map_user);
164 
165 /**
166  * blk_rq_unmap_user - unmap a request with user data
167  * @bio:	       start of bio list
168  *
169  * Description:
170  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
171  *    supply the original rq->bio from the blk_rq_map_user() return, since
172  *    the I/O completion may have changed rq->bio.
173  */
174 int blk_rq_unmap_user(struct bio *bio)
175 {
176 	struct bio *mapped_bio;
177 	int ret = 0, ret2;
178 
179 	while (bio) {
180 		mapped_bio = bio;
181 		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
182 			mapped_bio = bio->bi_private;
183 
184 		ret2 = __blk_rq_unmap_user(mapped_bio);
185 		if (ret2 && !ret)
186 			ret = ret2;
187 
188 		mapped_bio = bio;
189 		bio = bio->bi_next;
190 		bio_put(mapped_bio);
191 	}
192 
193 	return ret;
194 }
195 EXPORT_SYMBOL(blk_rq_unmap_user);
196 
197 /**
198  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
199  * @q:		request queue where request should be inserted
200  * @rq:		request to fill
201  * @kbuf:	the kernel buffer
202  * @len:	length of user data
203  * @gfp_mask:	memory allocation flags
204  *
205  * Description:
206  *    Data will be mapped directly if possible. Otherwise a bounce
207  *    buffer is used. Can be called multiple times to append multiple
208  *    buffers.
209  */
210 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
211 		    unsigned int len, gfp_t gfp_mask)
212 {
213 	int reading = rq_data_dir(rq) == READ;
214 	unsigned long addr = (unsigned long) kbuf;
215 	int do_copy = 0;
216 	struct bio *bio;
217 	int ret;
218 
219 	if (len > (queue_max_hw_sectors(q) << 9))
220 		return -EINVAL;
221 	if (!len || !kbuf)
222 		return -EINVAL;
223 
224 	do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
225 	if (do_copy)
226 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
227 	else
228 		bio = bio_map_kern(q, kbuf, len, gfp_mask);
229 
230 	if (IS_ERR(bio))
231 		return PTR_ERR(bio);
232 
233 	if (!reading)
234 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
235 
236 	if (do_copy)
237 		rq->rq_flags |= RQF_COPY_USER;
238 
239 	ret = blk_rq_append_bio(rq, bio);
240 	if (unlikely(ret)) {
241 		/* request is too big */
242 		bio_put(bio);
243 		return ret;
244 	}
245 
246 	blk_queue_bounce(q, &rq->bio);
247 	return 0;
248 }
249 EXPORT_SYMBOL(blk_rq_map_kern);
250