1 /* 2 * Functions related to mapping data to requests 3 */ 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/bio.h> 7 #include <linux/blkdev.h> 8 #include <scsi/sg.h> /* for struct sg_iovec */ 9 10 #include "blk.h" 11 12 int blk_rq_append_bio(struct request_queue *q, struct request *rq, 13 struct bio *bio) 14 { 15 if (!rq->bio) 16 blk_rq_bio_prep(q, rq, bio); 17 else if (!ll_back_merge_fn(q, rq, bio)) 18 return -EINVAL; 19 else { 20 rq->biotail->bi_next = bio; 21 rq->biotail = bio; 22 23 rq->data_len += bio->bi_size; 24 } 25 return 0; 26 } 27 EXPORT_SYMBOL(blk_rq_append_bio); 28 29 static int __blk_rq_unmap_user(struct bio *bio) 30 { 31 int ret = 0; 32 33 if (bio) { 34 if (bio_flagged(bio, BIO_USER_MAPPED)) 35 bio_unmap_user(bio); 36 else 37 ret = bio_uncopy_user(bio); 38 } 39 40 return ret; 41 } 42 43 static int __blk_rq_map_user(struct request_queue *q, struct request *rq, 44 struct rq_map_data *map_data, void __user *ubuf, 45 unsigned int len, int null_mapped, gfp_t gfp_mask) 46 { 47 unsigned long uaddr; 48 struct bio *bio, *orig_bio; 49 int reading, ret; 50 51 reading = rq_data_dir(rq) == READ; 52 53 /* 54 * if alignment requirement is satisfied, map in user pages for 55 * direct dma. else, set up kernel bounce buffers 56 */ 57 uaddr = (unsigned long) ubuf; 58 if (blk_rq_aligned(q, ubuf, len) && !map_data) 59 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); 60 else 61 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask); 62 63 if (IS_ERR(bio)) 64 return PTR_ERR(bio); 65 66 if (null_mapped) 67 bio->bi_flags |= (1 << BIO_NULL_MAPPED); 68 69 orig_bio = bio; 70 blk_queue_bounce(q, &bio); 71 72 /* 73 * We link the bounce buffer in and could have to traverse it 74 * later so we have to get a ref to prevent it from being freed 75 */ 76 bio_get(bio); 77 78 ret = blk_rq_append_bio(q, rq, bio); 79 if (!ret) 80 return bio->bi_size; 81 82 /* if it was boucned we must call the end io function */ 83 bio_endio(bio, 0); 84 __blk_rq_unmap_user(orig_bio); 85 bio_put(bio); 86 return ret; 87 } 88 89 /** 90 * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage 91 * @q: request queue where request should be inserted 92 * @rq: request structure to fill 93 * @map_data: pointer to the rq_map_data holding pages (if necessary) 94 * @ubuf: the user buffer 95 * @len: length of user data 96 * @gfp_mask: memory allocation flags 97 * 98 * Description: 99 * Data will be mapped directly for zero copy I/O, if possible. Otherwise 100 * a kernel bounce buffer is used. 101 * 102 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 103 * still in process context. 104 * 105 * Note: The mapped bio may need to be bounced through blk_queue_bounce() 106 * before being submitted to the device, as pages mapped may be out of 107 * reach. It's the callers responsibility to make sure this happens. The 108 * original bio must be passed back in to blk_rq_unmap_user() for proper 109 * unmapping. 110 */ 111 int blk_rq_map_user(struct request_queue *q, struct request *rq, 112 struct rq_map_data *map_data, void __user *ubuf, 113 unsigned long len, gfp_t gfp_mask) 114 { 115 unsigned long bytes_read = 0; 116 struct bio *bio = NULL; 117 int ret, null_mapped = 0; 118 119 if (len > (q->max_hw_sectors << 9)) 120 return -EINVAL; 121 if (!len) 122 return -EINVAL; 123 if (!ubuf) { 124 if (!map_data || rq_data_dir(rq) != READ) 125 return -EINVAL; 126 null_mapped = 1; 127 } 128 129 while (bytes_read != len) { 130 unsigned long map_len, end, start; 131 132 map_len = min_t(unsigned long, len - bytes_read, BIO_MAX_SIZE); 133 end = ((unsigned long)ubuf + map_len + PAGE_SIZE - 1) 134 >> PAGE_SHIFT; 135 start = (unsigned long)ubuf >> PAGE_SHIFT; 136 137 /* 138 * A bad offset could cause us to require BIO_MAX_PAGES + 1 139 * pages. If this happens we just lower the requested 140 * mapping len by a page so that we can fit 141 */ 142 if (end - start > BIO_MAX_PAGES) 143 map_len -= PAGE_SIZE; 144 145 ret = __blk_rq_map_user(q, rq, map_data, ubuf, map_len, 146 null_mapped, gfp_mask); 147 if (ret < 0) 148 goto unmap_rq; 149 if (!bio) 150 bio = rq->bio; 151 bytes_read += ret; 152 ubuf += ret; 153 } 154 155 if (!bio_flagged(bio, BIO_USER_MAPPED)) 156 rq->cmd_flags |= REQ_COPY_USER; 157 158 rq->buffer = rq->data = NULL; 159 return 0; 160 unmap_rq: 161 blk_rq_unmap_user(bio); 162 rq->bio = NULL; 163 return ret; 164 } 165 EXPORT_SYMBOL(blk_rq_map_user); 166 167 /** 168 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage 169 * @q: request queue where request should be inserted 170 * @rq: request to map data to 171 * @map_data: pointer to the rq_map_data holding pages (if necessary) 172 * @iov: pointer to the iovec 173 * @iov_count: number of elements in the iovec 174 * @len: I/O byte count 175 * @gfp_mask: memory allocation flags 176 * 177 * Description: 178 * Data will be mapped directly for zero copy I/O, if possible. Otherwise 179 * a kernel bounce buffer is used. 180 * 181 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while 182 * still in process context. 183 * 184 * Note: The mapped bio may need to be bounced through blk_queue_bounce() 185 * before being submitted to the device, as pages mapped may be out of 186 * reach. It's the callers responsibility to make sure this happens. The 187 * original bio must be passed back in to blk_rq_unmap_user() for proper 188 * unmapping. 189 */ 190 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, 191 struct rq_map_data *map_data, struct sg_iovec *iov, 192 int iov_count, unsigned int len, gfp_t gfp_mask) 193 { 194 struct bio *bio; 195 int i, read = rq_data_dir(rq) == READ; 196 int unaligned = 0; 197 198 if (!iov || iov_count <= 0) 199 return -EINVAL; 200 201 for (i = 0; i < iov_count; i++) { 202 unsigned long uaddr = (unsigned long)iov[i].iov_base; 203 204 if (uaddr & queue_dma_alignment(q)) { 205 unaligned = 1; 206 break; 207 } 208 } 209 210 if (unaligned || (q->dma_pad_mask & len) || map_data) 211 bio = bio_copy_user_iov(q, map_data, iov, iov_count, read, 212 gfp_mask); 213 else 214 bio = bio_map_user_iov(q, NULL, iov, iov_count, read, gfp_mask); 215 216 if (IS_ERR(bio)) 217 return PTR_ERR(bio); 218 219 if (bio->bi_size != len) { 220 bio_endio(bio, 0); 221 bio_unmap_user(bio); 222 return -EINVAL; 223 } 224 225 if (!bio_flagged(bio, BIO_USER_MAPPED)) 226 rq->cmd_flags |= REQ_COPY_USER; 227 228 blk_queue_bounce(q, &bio); 229 bio_get(bio); 230 blk_rq_bio_prep(q, rq, bio); 231 rq->buffer = rq->data = NULL; 232 return 0; 233 } 234 EXPORT_SYMBOL(blk_rq_map_user_iov); 235 236 /** 237 * blk_rq_unmap_user - unmap a request with user data 238 * @bio: start of bio list 239 * 240 * Description: 241 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must 242 * supply the original rq->bio from the blk_rq_map_user() return, since 243 * the I/O completion may have changed rq->bio. 244 */ 245 int blk_rq_unmap_user(struct bio *bio) 246 { 247 struct bio *mapped_bio; 248 int ret = 0, ret2; 249 250 while (bio) { 251 mapped_bio = bio; 252 if (unlikely(bio_flagged(bio, BIO_BOUNCED))) 253 mapped_bio = bio->bi_private; 254 255 ret2 = __blk_rq_unmap_user(mapped_bio); 256 if (ret2 && !ret) 257 ret = ret2; 258 259 mapped_bio = bio; 260 bio = bio->bi_next; 261 bio_put(mapped_bio); 262 } 263 264 return ret; 265 } 266 EXPORT_SYMBOL(blk_rq_unmap_user); 267 268 /** 269 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage 270 * @q: request queue where request should be inserted 271 * @rq: request to fill 272 * @kbuf: the kernel buffer 273 * @len: length of user data 274 * @gfp_mask: memory allocation flags 275 * 276 * Description: 277 * Data will be mapped directly if possible. Otherwise a bounce 278 * buffer is used. 279 */ 280 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 281 unsigned int len, gfp_t gfp_mask) 282 { 283 int reading = rq_data_dir(rq) == READ; 284 int do_copy = 0; 285 struct bio *bio; 286 287 if (len > (q->max_hw_sectors << 9)) 288 return -EINVAL; 289 if (!len || !kbuf) 290 return -EINVAL; 291 292 do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf); 293 if (do_copy) 294 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 295 else 296 bio = bio_map_kern(q, kbuf, len, gfp_mask); 297 298 if (IS_ERR(bio)) 299 return PTR_ERR(bio); 300 301 if (rq_data_dir(rq) == WRITE) 302 bio->bi_rw |= (1 << BIO_RW); 303 304 if (do_copy) 305 rq->cmd_flags |= REQ_COPY_USER; 306 307 blk_rq_bio_prep(q, rq, bio); 308 blk_queue_bounce(q, &rq->bio); 309 rq->buffer = rq->data = NULL; 310 return 0; 311 } 312 EXPORT_SYMBOL(blk_rq_map_kern); 313