xref: /linux/block/blk-map.c (revision 6561f0e547be221f411fda5eddfcc5bd8bb058a5)
1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Functions related to mapping data to requests
4   */
5  #include <linux/kernel.h>
6  #include <linux/sched/task_stack.h>
7  #include <linux/module.h>
8  #include <linux/bio.h>
9  #include <linux/blkdev.h>
10  #include <linux/uio.h>
11  
12  #include "blk.h"
13  
14  struct bio_map_data {
15  	bool is_our_pages : 1;
16  	bool is_null_mapped : 1;
17  	struct iov_iter iter;
18  	struct iovec iov[];
19  };
20  
21  static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
22  					       gfp_t gfp_mask)
23  {
24  	struct bio_map_data *bmd;
25  
26  	if (data->nr_segs > UIO_MAXIOV)
27  		return NULL;
28  
29  	bmd = kmalloc(struct_size(bmd, iov, data->nr_segs), gfp_mask);
30  	if (!bmd)
31  		return NULL;
32  	bmd->iter = *data;
33  	if (iter_is_iovec(data)) {
34  		memcpy(bmd->iov, iter_iov(data), sizeof(struct iovec) * data->nr_segs);
35  		bmd->iter.__iov = bmd->iov;
36  	}
37  	return bmd;
38  }
39  
40  /**
41   * bio_copy_from_iter - copy all pages from iov_iter to bio
42   * @bio: The &struct bio which describes the I/O as destination
43   * @iter: iov_iter as source
44   *
45   * Copy all pages from iov_iter to bio.
46   * Returns 0 on success, or error on failure.
47   */
48  static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
49  {
50  	struct bio_vec *bvec;
51  	struct bvec_iter_all iter_all;
52  
53  	bio_for_each_segment_all(bvec, bio, iter_all) {
54  		ssize_t ret;
55  
56  		ret = copy_page_from_iter(bvec->bv_page,
57  					  bvec->bv_offset,
58  					  bvec->bv_len,
59  					  iter);
60  
61  		if (!iov_iter_count(iter))
62  			break;
63  
64  		if (ret < bvec->bv_len)
65  			return -EFAULT;
66  	}
67  
68  	return 0;
69  }
70  
71  /**
72   * bio_copy_to_iter - copy all pages from bio to iov_iter
73   * @bio: The &struct bio which describes the I/O as source
74   * @iter: iov_iter as destination
75   *
76   * Copy all pages from bio to iov_iter.
77   * Returns 0 on success, or error on failure.
78   */
79  static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
80  {
81  	struct bio_vec *bvec;
82  	struct bvec_iter_all iter_all;
83  
84  	bio_for_each_segment_all(bvec, bio, iter_all) {
85  		ssize_t ret;
86  
87  		ret = copy_page_to_iter(bvec->bv_page,
88  					bvec->bv_offset,
89  					bvec->bv_len,
90  					&iter);
91  
92  		if (!iov_iter_count(&iter))
93  			break;
94  
95  		if (ret < bvec->bv_len)
96  			return -EFAULT;
97  	}
98  
99  	return 0;
100  }
101  
102  /**
103   *	bio_uncopy_user	-	finish previously mapped bio
104   *	@bio: bio being terminated
105   *
106   *	Free pages allocated from bio_copy_user_iov() and write back data
107   *	to user space in case of a read.
108   */
109  static int bio_uncopy_user(struct bio *bio)
110  {
111  	struct bio_map_data *bmd = bio->bi_private;
112  	int ret = 0;
113  
114  	if (!bmd->is_null_mapped) {
115  		/*
116  		 * if we're in a workqueue, the request is orphaned, so
117  		 * don't copy into a random user address space, just free
118  		 * and return -EINTR so user space doesn't expect any data.
119  		 */
120  		if (!current->mm)
121  			ret = -EINTR;
122  		else if (bio_data_dir(bio) == READ)
123  			ret = bio_copy_to_iter(bio, bmd->iter);
124  		if (bmd->is_our_pages)
125  			bio_free_pages(bio);
126  	}
127  	kfree(bmd);
128  	return ret;
129  }
130  
131  static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
132  		struct iov_iter *iter, gfp_t gfp_mask)
133  {
134  	struct bio_map_data *bmd;
135  	struct page *page;
136  	struct bio *bio;
137  	int i = 0, ret;
138  	int nr_pages;
139  	unsigned int len = iter->count;
140  	unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
141  
142  	bmd = bio_alloc_map_data(iter, gfp_mask);
143  	if (!bmd)
144  		return -ENOMEM;
145  
146  	/*
147  	 * We need to do a deep copy of the iov_iter including the iovecs.
148  	 * The caller provided iov might point to an on-stack or otherwise
149  	 * shortlived one.
150  	 */
151  	bmd->is_our_pages = !map_data;
152  	bmd->is_null_mapped = (map_data && map_data->null_mapped);
153  
154  	nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
155  
156  	ret = -ENOMEM;
157  	bio = bio_kmalloc(nr_pages, gfp_mask);
158  	if (!bio)
159  		goto out_bmd;
160  	bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, req_op(rq));
161  
162  	if (map_data) {
163  		nr_pages = 1U << map_data->page_order;
164  		i = map_data->offset / PAGE_SIZE;
165  	}
166  	while (len) {
167  		unsigned int bytes = PAGE_SIZE;
168  
169  		bytes -= offset;
170  
171  		if (bytes > len)
172  			bytes = len;
173  
174  		if (map_data) {
175  			if (i == map_data->nr_entries * nr_pages) {
176  				ret = -ENOMEM;
177  				goto cleanup;
178  			}
179  
180  			page = map_data->pages[i / nr_pages];
181  			page += (i % nr_pages);
182  
183  			i++;
184  		} else {
185  			page = alloc_page(GFP_NOIO | gfp_mask);
186  			if (!page) {
187  				ret = -ENOMEM;
188  				goto cleanup;
189  			}
190  		}
191  
192  		if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) {
193  			if (!map_data)
194  				__free_page(page);
195  			break;
196  		}
197  
198  		len -= bytes;
199  		offset = 0;
200  	}
201  
202  	if (map_data)
203  		map_data->offset += bio->bi_iter.bi_size;
204  
205  	/*
206  	 * success
207  	 */
208  	if (iov_iter_rw(iter) == WRITE &&
209  	     (!map_data || !map_data->null_mapped)) {
210  		ret = bio_copy_from_iter(bio, iter);
211  		if (ret)
212  			goto cleanup;
213  	} else if (map_data && map_data->from_user) {
214  		struct iov_iter iter2 = *iter;
215  
216  		/* This is the copy-in part of SG_DXFER_TO_FROM_DEV. */
217  		iter2.data_source = ITER_SOURCE;
218  		ret = bio_copy_from_iter(bio, &iter2);
219  		if (ret)
220  			goto cleanup;
221  	} else {
222  		if (bmd->is_our_pages)
223  			zero_fill_bio(bio);
224  		iov_iter_advance(iter, bio->bi_iter.bi_size);
225  	}
226  
227  	bio->bi_private = bmd;
228  
229  	ret = blk_rq_append_bio(rq, bio);
230  	if (ret)
231  		goto cleanup;
232  	return 0;
233  cleanup:
234  	if (!map_data)
235  		bio_free_pages(bio);
236  	bio_uninit(bio);
237  	kfree(bio);
238  out_bmd:
239  	kfree(bmd);
240  	return ret;
241  }
242  
243  static void blk_mq_map_bio_put(struct bio *bio)
244  {
245  	if (bio->bi_opf & REQ_ALLOC_CACHE) {
246  		bio_put(bio);
247  	} else {
248  		bio_uninit(bio);
249  		kfree(bio);
250  	}
251  }
252  
253  static struct bio *blk_rq_map_bio_alloc(struct request *rq,
254  		unsigned int nr_vecs, gfp_t gfp_mask)
255  {
256  	struct bio *bio;
257  
258  	if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) {
259  		bio = bio_alloc_bioset(NULL, nr_vecs, rq->cmd_flags, gfp_mask,
260  					&fs_bio_set);
261  		if (!bio)
262  			return NULL;
263  	} else {
264  		bio = bio_kmalloc(nr_vecs, gfp_mask);
265  		if (!bio)
266  			return NULL;
267  		bio_init(bio, NULL, bio->bi_inline_vecs, nr_vecs, req_op(rq));
268  	}
269  	return bio;
270  }
271  
272  static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
273  		gfp_t gfp_mask)
274  {
275  	iov_iter_extraction_t extraction_flags = 0;
276  	unsigned int max_sectors = queue_max_hw_sectors(rq->q);
277  	unsigned int nr_vecs = iov_iter_npages(iter, BIO_MAX_VECS);
278  	struct bio *bio;
279  	int ret;
280  	int j;
281  
282  	if (!iov_iter_count(iter))
283  		return -EINVAL;
284  
285  	bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
286  	if (bio == NULL)
287  		return -ENOMEM;
288  
289  	if (blk_queue_pci_p2pdma(rq->q))
290  		extraction_flags |= ITER_ALLOW_P2PDMA;
291  	if (iov_iter_extract_will_pin(iter))
292  		bio_set_flag(bio, BIO_PAGE_PINNED);
293  
294  	while (iov_iter_count(iter)) {
295  		struct page *stack_pages[UIO_FASTIOV];
296  		struct page **pages = stack_pages;
297  		ssize_t bytes;
298  		size_t offs;
299  		int npages;
300  
301  		if (nr_vecs > ARRAY_SIZE(stack_pages))
302  			pages = NULL;
303  
304  		bytes = iov_iter_extract_pages(iter, &pages, LONG_MAX,
305  					       nr_vecs, extraction_flags, &offs);
306  		if (unlikely(bytes <= 0)) {
307  			ret = bytes ? bytes : -EFAULT;
308  			goto out_unmap;
309  		}
310  
311  		npages = DIV_ROUND_UP(offs + bytes, PAGE_SIZE);
312  
313  		if (unlikely(offs & queue_dma_alignment(rq->q)))
314  			j = 0;
315  		else {
316  			for (j = 0; j < npages; j++) {
317  				struct page *page = pages[j];
318  				unsigned int n = PAGE_SIZE - offs;
319  				bool same_page = false;
320  
321  				if (n > bytes)
322  					n = bytes;
323  
324  				if (!bio_add_hw_page(rq->q, bio, page, n, offs,
325  						     max_sectors, &same_page))
326  					break;
327  
328  				if (same_page)
329  					bio_release_page(bio, page);
330  				bytes -= n;
331  				offs = 0;
332  			}
333  		}
334  		/*
335  		 * release the pages we didn't map into the bio, if any
336  		 */
337  		while (j < npages)
338  			bio_release_page(bio, pages[j++]);
339  		if (pages != stack_pages)
340  			kvfree(pages);
341  		/* couldn't stuff something into bio? */
342  		if (bytes) {
343  			iov_iter_revert(iter, bytes);
344  			break;
345  		}
346  	}
347  
348  	ret = blk_rq_append_bio(rq, bio);
349  	if (ret)
350  		goto out_unmap;
351  	return 0;
352  
353   out_unmap:
354  	bio_release_pages(bio, false);
355  	blk_mq_map_bio_put(bio);
356  	return ret;
357  }
358  
359  static void bio_invalidate_vmalloc_pages(struct bio *bio)
360  {
361  #ifdef ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE
362  	if (bio->bi_private && !op_is_write(bio_op(bio))) {
363  		unsigned long i, len = 0;
364  
365  		for (i = 0; i < bio->bi_vcnt; i++)
366  			len += bio->bi_io_vec[i].bv_len;
367  		invalidate_kernel_vmap_range(bio->bi_private, len);
368  	}
369  #endif
370  }
371  
372  static void bio_map_kern_endio(struct bio *bio)
373  {
374  	bio_invalidate_vmalloc_pages(bio);
375  	bio_uninit(bio);
376  	kfree(bio);
377  }
378  
379  /**
380   *	bio_map_kern	-	map kernel address into bio
381   *	@q: the struct request_queue for the bio
382   *	@data: pointer to buffer to map
383   *	@len: length in bytes
384   *	@gfp_mask: allocation flags for bio allocation
385   *
386   *	Map the kernel address into a bio suitable for io to a block
387   *	device. Returns an error pointer in case of error.
388   */
389  static struct bio *bio_map_kern(struct request_queue *q, void *data,
390  		unsigned int len, gfp_t gfp_mask)
391  {
392  	unsigned long kaddr = (unsigned long)data;
393  	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
394  	unsigned long start = kaddr >> PAGE_SHIFT;
395  	const int nr_pages = end - start;
396  	bool is_vmalloc = is_vmalloc_addr(data);
397  	struct page *page;
398  	int offset, i;
399  	struct bio *bio;
400  
401  	bio = bio_kmalloc(nr_pages, gfp_mask);
402  	if (!bio)
403  		return ERR_PTR(-ENOMEM);
404  	bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
405  
406  	if (is_vmalloc) {
407  		flush_kernel_vmap_range(data, len);
408  		bio->bi_private = data;
409  	}
410  
411  	offset = offset_in_page(kaddr);
412  	for (i = 0; i < nr_pages; i++) {
413  		unsigned int bytes = PAGE_SIZE - offset;
414  
415  		if (len <= 0)
416  			break;
417  
418  		if (bytes > len)
419  			bytes = len;
420  
421  		if (!is_vmalloc)
422  			page = virt_to_page(data);
423  		else
424  			page = vmalloc_to_page(data);
425  		if (bio_add_pc_page(q, bio, page, bytes,
426  				    offset) < bytes) {
427  			/* we don't support partial mappings */
428  			bio_uninit(bio);
429  			kfree(bio);
430  			return ERR_PTR(-EINVAL);
431  		}
432  
433  		data += bytes;
434  		len -= bytes;
435  		offset = 0;
436  	}
437  
438  	bio->bi_end_io = bio_map_kern_endio;
439  	return bio;
440  }
441  
442  static void bio_copy_kern_endio(struct bio *bio)
443  {
444  	bio_free_pages(bio);
445  	bio_uninit(bio);
446  	kfree(bio);
447  }
448  
449  static void bio_copy_kern_endio_read(struct bio *bio)
450  {
451  	char *p = bio->bi_private;
452  	struct bio_vec *bvec;
453  	struct bvec_iter_all iter_all;
454  
455  	bio_for_each_segment_all(bvec, bio, iter_all) {
456  		memcpy_from_bvec(p, bvec);
457  		p += bvec->bv_len;
458  	}
459  
460  	bio_copy_kern_endio(bio);
461  }
462  
463  /**
464   *	bio_copy_kern	-	copy kernel address into bio
465   *	@q: the struct request_queue for the bio
466   *	@data: pointer to buffer to copy
467   *	@len: length in bytes
468   *	@gfp_mask: allocation flags for bio and page allocation
469   *	@reading: data direction is READ
470   *
471   *	copy the kernel address into a bio suitable for io to a block
472   *	device. Returns an error pointer in case of error.
473   */
474  static struct bio *bio_copy_kern(struct request_queue *q, void *data,
475  		unsigned int len, gfp_t gfp_mask, int reading)
476  {
477  	unsigned long kaddr = (unsigned long)data;
478  	unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
479  	unsigned long start = kaddr >> PAGE_SHIFT;
480  	struct bio *bio;
481  	void *p = data;
482  	int nr_pages = 0;
483  
484  	/*
485  	 * Overflow, abort
486  	 */
487  	if (end < start)
488  		return ERR_PTR(-EINVAL);
489  
490  	nr_pages = end - start;
491  	bio = bio_kmalloc(nr_pages, gfp_mask);
492  	if (!bio)
493  		return ERR_PTR(-ENOMEM);
494  	bio_init(bio, NULL, bio->bi_inline_vecs, nr_pages, 0);
495  
496  	while (len) {
497  		struct page *page;
498  		unsigned int bytes = PAGE_SIZE;
499  
500  		if (bytes > len)
501  			bytes = len;
502  
503  		page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask);
504  		if (!page)
505  			goto cleanup;
506  
507  		if (!reading)
508  			memcpy(page_address(page), p, bytes);
509  
510  		if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
511  			break;
512  
513  		len -= bytes;
514  		p += bytes;
515  	}
516  
517  	if (reading) {
518  		bio->bi_end_io = bio_copy_kern_endio_read;
519  		bio->bi_private = data;
520  	} else {
521  		bio->bi_end_io = bio_copy_kern_endio;
522  	}
523  
524  	return bio;
525  
526  cleanup:
527  	bio_free_pages(bio);
528  	bio_uninit(bio);
529  	kfree(bio);
530  	return ERR_PTR(-ENOMEM);
531  }
532  
533  /*
534   * Append a bio to a passthrough request.  Only works if the bio can be merged
535   * into the request based on the driver constraints.
536   */
537  int blk_rq_append_bio(struct request *rq, struct bio *bio)
538  {
539  	struct bvec_iter iter;
540  	struct bio_vec bv;
541  	unsigned int nr_segs = 0;
542  
543  	bio_for_each_bvec(bv, bio, iter)
544  		nr_segs++;
545  
546  	if (!rq->bio) {
547  		blk_rq_bio_prep(rq, bio, nr_segs);
548  	} else {
549  		if (!ll_back_merge_fn(rq, bio, nr_segs))
550  			return -EINVAL;
551  		rq->biotail->bi_next = bio;
552  		rq->biotail = bio;
553  		rq->__data_len += (bio)->bi_iter.bi_size;
554  		bio_crypt_free_ctx(bio);
555  	}
556  
557  	return 0;
558  }
559  EXPORT_SYMBOL(blk_rq_append_bio);
560  
561  /* Prepare bio for passthrough IO given ITER_BVEC iter */
562  static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
563  {
564  	const struct queue_limits *lim = &rq->q->limits;
565  	unsigned int max_bytes = lim->max_hw_sectors << SECTOR_SHIFT;
566  	unsigned int nsegs;
567  	struct bio *bio;
568  	int ret;
569  
570  	if (!iov_iter_count(iter) || iov_iter_count(iter) > max_bytes)
571  		return -EINVAL;
572  
573  	/* reuse the bvecs from the iterator instead of allocating new ones */
574  	bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
575  	if (!bio)
576  		return -ENOMEM;
577  	bio_iov_bvec_set(bio, (struct iov_iter *)iter);
578  
579  	/* check that the data layout matches the hardware restrictions */
580  	ret = bio_split_rw_at(bio, lim, &nsegs, max_bytes);
581  	if (ret) {
582  		/* if we would have to split the bio, copy instead */
583  		if (ret > 0)
584  			ret = -EREMOTEIO;
585  		blk_mq_map_bio_put(bio);
586  		return ret;
587  	}
588  
589  	blk_rq_bio_prep(rq, bio, nsegs);
590  	return 0;
591  }
592  
593  /**
594   * blk_rq_map_user_iov - map user data to a request, for passthrough requests
595   * @q:		request queue where request should be inserted
596   * @rq:		request to map data to
597   * @map_data:   pointer to the rq_map_data holding pages (if necessary)
598   * @iter:	iovec iterator
599   * @gfp_mask:	memory allocation flags
600   *
601   * Description:
602   *    Data will be mapped directly for zero copy I/O, if possible. Otherwise
603   *    a kernel bounce buffer is used.
604   *
605   *    A matching blk_rq_unmap_user() must be issued at the end of I/O, while
606   *    still in process context.
607   */
608  int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
609  			struct rq_map_data *map_data,
610  			const struct iov_iter *iter, gfp_t gfp_mask)
611  {
612  	bool copy = false, map_bvec = false;
613  	unsigned long align = blk_lim_dma_alignment_and_pad(&q->limits);
614  	struct bio *bio = NULL;
615  	struct iov_iter i;
616  	int ret = -EINVAL;
617  
618  	if (map_data)
619  		copy = true;
620  	else if (blk_queue_may_bounce(q))
621  		copy = true;
622  	else if (iov_iter_alignment(iter) & align)
623  		copy = true;
624  	else if (iov_iter_is_bvec(iter))
625  		map_bvec = true;
626  	else if (!user_backed_iter(iter))
627  		copy = true;
628  	else if (queue_virt_boundary(q))
629  		copy = queue_virt_boundary(q) & iov_iter_gap_alignment(iter);
630  
631  	if (map_bvec) {
632  		ret = blk_rq_map_user_bvec(rq, iter);
633  		if (!ret)
634  			return 0;
635  		if (ret != -EREMOTEIO)
636  			goto fail;
637  		/* fall back to copying the data on limits mismatches */
638  		copy = true;
639  	}
640  
641  	i = *iter;
642  	do {
643  		if (copy)
644  			ret = bio_copy_user_iov(rq, map_data, &i, gfp_mask);
645  		else
646  			ret = bio_map_user_iov(rq, &i, gfp_mask);
647  		if (ret)
648  			goto unmap_rq;
649  		if (!bio)
650  			bio = rq->bio;
651  	} while (iov_iter_count(&i));
652  
653  	return 0;
654  
655  unmap_rq:
656  	blk_rq_unmap_user(bio);
657  fail:
658  	rq->bio = NULL;
659  	return ret;
660  }
661  EXPORT_SYMBOL(blk_rq_map_user_iov);
662  
663  int blk_rq_map_user(struct request_queue *q, struct request *rq,
664  		    struct rq_map_data *map_data, void __user *ubuf,
665  		    unsigned long len, gfp_t gfp_mask)
666  {
667  	struct iov_iter i;
668  	int ret = import_ubuf(rq_data_dir(rq), ubuf, len, &i);
669  
670  	if (unlikely(ret < 0))
671  		return ret;
672  
673  	return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask);
674  }
675  EXPORT_SYMBOL(blk_rq_map_user);
676  
677  int blk_rq_map_user_io(struct request *req, struct rq_map_data *map_data,
678  		void __user *ubuf, unsigned long buf_len, gfp_t gfp_mask,
679  		bool vec, int iov_count, bool check_iter_count, int rw)
680  {
681  	int ret = 0;
682  
683  	if (vec) {
684  		struct iovec fast_iov[UIO_FASTIOV];
685  		struct iovec *iov = fast_iov;
686  		struct iov_iter iter;
687  
688  		ret = import_iovec(rw, ubuf, iov_count ? iov_count : buf_len,
689  				UIO_FASTIOV, &iov, &iter);
690  		if (ret < 0)
691  			return ret;
692  
693  		if (iov_count) {
694  			/* SG_IO howto says that the shorter of the two wins */
695  			iov_iter_truncate(&iter, buf_len);
696  			if (check_iter_count && !iov_iter_count(&iter)) {
697  				kfree(iov);
698  				return -EINVAL;
699  			}
700  		}
701  
702  		ret = blk_rq_map_user_iov(req->q, req, map_data, &iter,
703  				gfp_mask);
704  		kfree(iov);
705  	} else if (buf_len) {
706  		ret = blk_rq_map_user(req->q, req, map_data, ubuf, buf_len,
707  				gfp_mask);
708  	}
709  	return ret;
710  }
711  EXPORT_SYMBOL(blk_rq_map_user_io);
712  
713  /**
714   * blk_rq_unmap_user - unmap a request with user data
715   * @bio:	       start of bio list
716   *
717   * Description:
718   *    Unmap a rq previously mapped by blk_rq_map_user(). The caller must
719   *    supply the original rq->bio from the blk_rq_map_user() return, since
720   *    the I/O completion may have changed rq->bio.
721   */
722  int blk_rq_unmap_user(struct bio *bio)
723  {
724  	struct bio *next_bio;
725  	int ret = 0, ret2;
726  
727  	while (bio) {
728  		if (bio->bi_private) {
729  			ret2 = bio_uncopy_user(bio);
730  			if (ret2 && !ret)
731  				ret = ret2;
732  		} else {
733  			bio_release_pages(bio, bio_data_dir(bio) == READ);
734  		}
735  
736  		if (bio_integrity(bio))
737  			bio_integrity_unmap_user(bio);
738  
739  		next_bio = bio;
740  		bio = bio->bi_next;
741  		blk_mq_map_bio_put(next_bio);
742  	}
743  
744  	return ret;
745  }
746  EXPORT_SYMBOL(blk_rq_unmap_user);
747  
748  /**
749   * blk_rq_map_kern - map kernel data to a request, for passthrough requests
750   * @q:		request queue where request should be inserted
751   * @rq:		request to fill
752   * @kbuf:	the kernel buffer
753   * @len:	length of user data
754   * @gfp_mask:	memory allocation flags
755   *
756   * Description:
757   *    Data will be mapped directly if possible. Otherwise a bounce
758   *    buffer is used. Can be called multiple times to append multiple
759   *    buffers.
760   */
761  int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
762  		    unsigned int len, gfp_t gfp_mask)
763  {
764  	int reading = rq_data_dir(rq) == READ;
765  	unsigned long addr = (unsigned long) kbuf;
766  	struct bio *bio;
767  	int ret;
768  
769  	if (len > (queue_max_hw_sectors(q) << 9))
770  		return -EINVAL;
771  	if (!len || !kbuf)
772  		return -EINVAL;
773  
774  	if (!blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf) ||
775  	    blk_queue_may_bounce(q))
776  		bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
777  	else
778  		bio = bio_map_kern(q, kbuf, len, gfp_mask);
779  
780  	if (IS_ERR(bio))
781  		return PTR_ERR(bio);
782  
783  	bio->bi_opf &= ~REQ_OP_MASK;
784  	bio->bi_opf |= req_op(rq);
785  
786  	ret = blk_rq_append_bio(rq, bio);
787  	if (unlikely(ret)) {
788  		bio_uninit(bio);
789  		kfree(bio);
790  	}
791  	return ret;
792  }
793  EXPORT_SYMBOL(blk_rq_map_kern);
794