xref: /linux/block/blk-map.c (revision bb9707077b4ee5f77bc9939b057ff8a0d410296f)
1 /*
2  * Functions related to mapping data to requests
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/uio.h>
9 
10 #include "blk.h"
11 
12 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
13 		      struct bio *bio)
14 {
15 	if (!rq->bio)
16 		blk_rq_bio_prep(q, rq, bio);
17 	else if (!ll_back_merge_fn(q, rq, bio))
18 		return -EINVAL;
19 	else {
20 		rq->biotail->bi_next = bio;
21 		rq->biotail = bio;
22 
23 		rq->__data_len += bio->bi_iter.bi_size;
24 	}
25 	return 0;
26 }
27 
28 static int __blk_rq_unmap_user(struct bio *bio)
29 {
30 	int ret = 0;
31 
32 	if (bio) {
33 		if (bio_flagged(bio, BIO_USER_MAPPED))
34 			bio_unmap_user(bio);
35 		else
36 			ret = bio_uncopy_user(bio);
37 	}
38 
39 	return ret;
40 }
41 
42 static int __blk_rq_map_user_iov(struct request *rq,
43 		struct rq_map_data *map_data, struct iov_iter *iter,
44 		gfp_t gfp_mask, bool copy)
45 {
46 	struct request_queue *q = rq->q;
47 	struct bio *bio, *orig_bio;
48 	int ret;
49 
50 	if (copy)
51 		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
52 	else
53 		bio = bio_map_user_iov(q, iter, gfp_mask);
54 
55 	if (IS_ERR(bio))
56 		return PTR_ERR(bio);
57 
58 	if (map_data && map_data->null_mapped)
59 		bio_set_flag(bio, BIO_NULL_MAPPED);
60 
61 	iov_iter_advance(iter, bio->bi_iter.bi_size);
62 	if (map_data)
63 		map_data->offset += bio->bi_iter.bi_size;
64 
65 	orig_bio = bio;
66 	blk_queue_bounce(q, &bio);
67 
68 	/*
69 	 * We link the bounce buffer in and could have to traverse it
70 	 * later so we have to get a ref to prevent it from being freed
71 	 */
72 	bio_get(bio);
73 
74 	ret = blk_rq_append_bio(q, rq, bio);
75 	if (ret) {
76 		bio_endio(bio);
77 		__blk_rq_unmap_user(orig_bio);
78 		bio_put(bio);
79 		return ret;
80 	}
81 
82 	return 0;
83 }
84 
85 /**
86  * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
87  * @q:		request queue where request should be inserted
88  * @rq:		request to map data to
89  * @map_data:   pointer to the rq_map_data holding pages (if necessary)
90  * @iter:	iovec iterator
91  * @gfp_mask:	memory allocation flags
92  *
93  * Description:
94  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
95  *    a kernel bounce buffer is used.
96  *
97  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
98  *    still in process context.
99  *
100  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
101  *    before being submitted to the device, as pages mapped may be out of
102  *    reach. It's the callers responsibility to make sure this happens. The
103  *    original bio must be passed back in to blk_rq_unmap_user() for proper
104  *    unmapping.
105  */
106 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
107 			struct rq_map_data *map_data,
108 			const struct iov_iter *iter, gfp_t gfp_mask)
109 {
110 	bool copy = false;
111 	unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
112 	struct bio *bio = NULL;
113 	struct iov_iter i;
114 	int ret;
115 
116 	if (map_data)
117 		copy = true;
118 	else if (iov_iter_alignment(iter) & align)
119 		copy = true;
120 	else if (queue_virt_boundary(q))
121 		copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
122 
123 	i = *iter;
124 	do {
125 		ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
126 		if (ret)
127 			goto unmap_rq;
128 		if (!bio)
129 			bio = rq->bio;
130 	} while (iov_iter_count(&i));
131 
132 	if (!bio_flagged(bio, BIO_USER_MAPPED))
133 		rq->cmd_flags |= REQ_COPY_USER;
134 	return 0;
135 
136 unmap_rq:
137 	__blk_rq_unmap_user(bio);
138 	rq->bio = NULL;
139 	return -EINVAL;
140 }
141 EXPORT_SYMBOL(blk_rq_map_user_iov);
142 
143 int blk_rq_map_user(struct request_queue *q, struct request *rq,
144 		    struct rq_map_data *map_data, void __user *ubuf,
145 		    unsigned long len, gfp_t gfp_mask)
146 {
147 	struct iovec iov;
148 	struct iov_iter i;
149 	int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
150 
151 	if (unlikely(ret < 0))
152 		return ret;
153 
154 	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
155 }
156 EXPORT_SYMBOL(blk_rq_map_user);
157 
158 /**
159  * blk_rq_unmap_user - unmap a request with user data
160  * @bio:	       start of bio list
161  *
162  * Description:
163  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
164  *    supply the original rq->bio from the blk_rq_map_user() return, since
165  *    the I/O completion may have changed rq->bio.
166  */
167 int blk_rq_unmap_user(struct bio *bio)
168 {
169 	struct bio *mapped_bio;
170 	int ret = 0, ret2;
171 
172 	while (bio) {
173 		mapped_bio = bio;
174 		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
175 			mapped_bio = bio->bi_private;
176 
177 		ret2 = __blk_rq_unmap_user(mapped_bio);
178 		if (ret2 && !ret)
179 			ret = ret2;
180 
181 		mapped_bio = bio;
182 		bio = bio->bi_next;
183 		bio_put(mapped_bio);
184 	}
185 
186 	return ret;
187 }
188 EXPORT_SYMBOL(blk_rq_unmap_user);
189 
190 /**
191  * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
192  * @q:		request queue where request should be inserted
193  * @rq:		request to fill
194  * @kbuf:	the kernel buffer
195  * @len:	length of user data
196  * @gfp_mask:	memory allocation flags
197  *
198  * Description:
199  *    Data will be mapped directly if possible. Otherwise a bounce
200  *    buffer is used. Can be called multiple times to append multiple
201  *    buffers.
202  */
203 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
204 		    unsigned int len, gfp_t gfp_mask)
205 {
206 	int reading = rq_data_dir(rq) == READ;
207 	unsigned long addr = (unsigned long) kbuf;
208 	int do_copy = 0;
209 	struct bio *bio;
210 	int ret;
211 
212 	if (len > (queue_max_hw_sectors(q) << 9))
213 		return -EINVAL;
214 	if (!len || !kbuf)
215 		return -EINVAL;
216 
217 	do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
218 	if (do_copy)
219 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
220 	else
221 		bio = bio_map_kern(q, kbuf, len, gfp_mask);
222 
223 	if (IS_ERR(bio))
224 		return PTR_ERR(bio);
225 
226 	if (!reading)
227 		bio->bi_rw |= REQ_WRITE;
228 
229 	if (do_copy)
230 		rq->cmd_flags |= REQ_COPY_USER;
231 
232 	ret = blk_rq_append_bio(q, rq, bio);
233 	if (unlikely(ret)) {
234 		/* request is too big */
235 		bio_put(bio);
236 		return ret;
237 	}
238 
239 	blk_queue_bounce(q, &rq->bio);
240 	return 0;
241 }
242 EXPORT_SYMBOL(blk_rq_map_kern);
243