xref: /linux/block/blk-map.c (revision b2d0f5d5dc53532e6f07bc546a476a55ebdfe0f3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to mapping data to requests
4  */
5 #include <linux/kernel.h>
6 #include <linux/sched/task_stack.h>
7 #include <linux/module.h>
8 #include <linux/bio.h>
9 #include <linux/blkdev.h>
10 #include <linux/uio.h>
11 
12 #include "blk.h"
13 
14 /*
15  * Append a bio to a passthrough request.  Only works can be merged into
16  * the request based on the driver constraints.
17  */
18 int blk_rq_append_bio(struct request *rq, struct bio *bio)
19 {
20 	blk_queue_bounce(rq->q, &bio);
21 
22 	if (!rq->bio) {
23 		blk_rq_bio_prep(rq->q, rq, bio);
24 	} else {
25 		if (!ll_back_merge_fn(rq->q, rq, bio))
26 			return -EINVAL;
27 
28 		rq->biotail->bi_next = bio;
29 		rq->biotail = bio;
30 		rq->__data_len += bio->bi_iter.bi_size;
31 	}
32 
33 	return 0;
34 }
35 EXPORT_SYMBOL(blk_rq_append_bio);
36 
37 static int __blk_rq_unmap_user(struct bio *bio)
38 {
39 	int ret = 0;
40 
41 	if (bio) {
42 		if (bio_flagged(bio, BIO_USER_MAPPED))
43 			bio_unmap_user(bio);
44 		else
45 			ret = bio_uncopy_user(bio);
46 	}
47 
48 	return ret;
49 }
50 
51 static int __blk_rq_map_user_iov(struct request *rq,
52 		struct rq_map_data *map_data, struct iov_iter *iter,
53 		gfp_t gfp_mask, bool copy)
54 {
55 	struct request_queue *q = rq->q;
56 	struct bio *bio, *orig_bio;
57 	int ret;
58 
59 	if (copy)
60 		bio = bio_copy_user_iov(q, map_data, iter, gfp_mask);
61 	else
62 		bio = bio_map_user_iov(q, iter, gfp_mask);
63 
64 	if (IS_ERR(bio))
65 		return PTR_ERR(bio);
66 
67 	bio->bi_opf &= ~REQ_OP_MASK;
68 	bio->bi_opf |= req_op(rq);
69 
70 	if (map_data && map_data->null_mapped)
71 		bio_set_flag(bio, BIO_NULL_MAPPED);
72 
73 	iov_iter_advance(iter, bio->bi_iter.bi_size);
74 	if (map_data)
75 		map_data->offset += bio->bi_iter.bi_size;
76 
77 	orig_bio = bio;
78 
79 	/*
80 	 * We link the bounce buffer in and could have to traverse it
81 	 * later so we have to get a ref to prevent it from being freed
82 	 */
83 	ret = blk_rq_append_bio(rq, bio);
84 	bio_get(bio);
85 	if (ret) {
86 		bio_endio(bio);
87 		__blk_rq_unmap_user(orig_bio);
88 		bio_put(bio);
89 		return ret;
90 	}
91 
92 	return 0;
93 }
94 
95 /**
96  * blk_rq_map_user_iov - map user data to a request, for passthrough requests
97  * @q:		request queue where request should be inserted
98  * @rq:		request to map data to
99  * @map_data:   pointer to the rq_map_data holding pages (if necessary)
100  * @iter:	iovec iterator
101  * @gfp_mask:	memory allocation flags
102  *
103  * Description:
104  *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
105  *    a kernel bounce buffer is used.
106  *
107  *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
108  *    still in process context.
109  *
110  *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
111  *    before being submitted to the device, as pages mapped may be out of
112  *    reach. It's the callers responsibility to make sure this happens. The
113  *    original bio must be passed back in to blk_rq_unmap_user() for proper
114  *    unmapping.
115  */
116 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
117 			struct rq_map_data *map_data,
118 			const struct iov_iter *iter, gfp_t gfp_mask)
119 {
120 	bool copy = false;
121 	unsigned long align = q->dma_pad_mask | queue_dma_alignment(q);
122 	struct bio *bio = NULL;
123 	struct iov_iter i;
124 	int ret;
125 
126 	if (!iter_is_iovec(iter))
127 		goto fail;
128 
129 	if (map_data)
130 		copy = true;
131 	else if (iov_iter_alignment(iter) & align)
132 		copy = true;
133 	else if (queue_virt_boundary(q))
134 		copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
135 
136 	i = *iter;
137 	do {
138 		ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy);
139 		if (ret)
140 			goto unmap_rq;
141 		if (!bio)
142 			bio = rq->bio;
143 	} while (iov_iter_count(&i));
144 
145 	if (!bio_flagged(bio, BIO_USER_MAPPED))
146 		rq->rq_flags |= RQF_COPY_USER;
147 	return 0;
148 
149 unmap_rq:
150 	__blk_rq_unmap_user(bio);
151 fail:
152 	rq->bio = NULL;
153 	return -EINVAL;
154 }
155 EXPORT_SYMBOL(blk_rq_map_user_iov);
156 
157 int blk_rq_map_user(struct request_queue *q, struct request *rq,
158 		    struct rq_map_data *map_data, void __user *ubuf,
159 		    unsigned long len, gfp_t gfp_mask)
160 {
161 	struct iovec iov;
162 	struct iov_iter i;
163 	int ret = import_single_range(rq_data_dir(rq), ubuf, len, &iov, &i);
164 
165 	if (unlikely(ret < 0))
166 		return ret;
167 
168 	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
169 }
170 EXPORT_SYMBOL(blk_rq_map_user);
171 
172 /**
173  * blk_rq_unmap_user - unmap a request with user data
174  * @bio:	       start of bio list
175  *
176  * Description:
177  *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
178  *    supply the original rq->bio from the blk_rq_map_user() return, since
179  *    the I/O completion may have changed rq->bio.
180  */
181 int blk_rq_unmap_user(struct bio *bio)
182 {
183 	struct bio *mapped_bio;
184 	int ret = 0, ret2;
185 
186 	while (bio) {
187 		mapped_bio = bio;
188 		if (unlikely(bio_flagged(bio, BIO_BOUNCED)))
189 			mapped_bio = bio->bi_private;
190 
191 		ret2 = __blk_rq_unmap_user(mapped_bio);
192 		if (ret2 && !ret)
193 			ret = ret2;
194 
195 		mapped_bio = bio;
196 		bio = bio->bi_next;
197 		bio_put(mapped_bio);
198 	}
199 
200 	return ret;
201 }
202 EXPORT_SYMBOL(blk_rq_unmap_user);
203 
204 /**
205  * blk_rq_map_kern - map kernel data to a request, for passthrough requests
206  * @q:		request queue where request should be inserted
207  * @rq:		request to fill
208  * @kbuf:	the kernel buffer
209  * @len:	length of user data
210  * @gfp_mask:	memory allocation flags
211  *
212  * Description:
213  *    Data will be mapped directly if possible. Otherwise a bounce
214  *    buffer is used. Can be called multiple times to append multiple
215  *    buffers.
216  */
217 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
218 		    unsigned int len, gfp_t gfp_mask)
219 {
220 	int reading = rq_data_dir(rq) == READ;
221 	unsigned long addr = (unsigned long) kbuf;
222 	int do_copy = 0;
223 	struct bio *bio;
224 	int ret;
225 
226 	if (len > (queue_max_hw_sectors(q) << 9))
227 		return -EINVAL;
228 	if (!len || !kbuf)
229 		return -EINVAL;
230 
231 	do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
232 	if (do_copy)
233 		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
234 	else
235 		bio = bio_map_kern(q, kbuf, len, gfp_mask);
236 
237 	if (IS_ERR(bio))
238 		return PTR_ERR(bio);
239 
240 	bio->bi_opf &= ~REQ_OP_MASK;
241 	bio->bi_opf |= req_op(rq);
242 
243 	if (do_copy)
244 		rq->rq_flags |= RQF_COPY_USER;
245 
246 	ret = blk_rq_append_bio(rq, bio);
247 	if (unlikely(ret)) {
248 		/* request is too big */
249 		bio_put(bio);
250 		return ret;
251 	}
252 
253 	return 0;
254 }
255 EXPORT_SYMBOL(blk_rq_map_kern);
256