xref: /linux/block/blk-mq-dma.c (revision df9c299371054cb725eef730fd0f1d0fe2ed6bb0)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2025 Christoph Hellwig
4  */
5 #include "blk.h"
6 
7 struct phys_vec {
8 	phys_addr_t	paddr;
9 	u32		len;
10 };
11 
12 static bool blk_map_iter_next(struct request *req, struct req_iterator *iter,
13 			      struct phys_vec *vec)
14 {
15 	unsigned int max_size;
16 	struct bio_vec bv;
17 
18 	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
19 		if (!iter->bio)
20 			return false;
21 		vec->paddr = bvec_phys(&req->special_vec);
22 		vec->len = req->special_vec.bv_len;
23 		iter->bio = NULL;
24 		return true;
25 	}
26 
27 	if (!iter->iter.bi_size)
28 		return false;
29 
30 	bv = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
31 	vec->paddr = bvec_phys(&bv);
32 	max_size = get_max_segment_size(&req->q->limits, vec->paddr, UINT_MAX);
33 	bv.bv_len = min(bv.bv_len, max_size);
34 	bio_advance_iter_single(iter->bio, &iter->iter, bv.bv_len);
35 
36 	/*
37 	 * If we are entirely done with this bi_io_vec entry, check if the next
38 	 * one could be merged into it.  This typically happens when moving to
39 	 * the next bio, but some callers also don't pack bvecs tight.
40 	 */
41 	while (!iter->iter.bi_size || !iter->iter.bi_bvec_done) {
42 		struct bio_vec next;
43 
44 		if (!iter->iter.bi_size) {
45 			if (!iter->bio->bi_next)
46 				break;
47 			iter->bio = iter->bio->bi_next;
48 			iter->iter = iter->bio->bi_iter;
49 		}
50 
51 		next = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter);
52 		if (bv.bv_len + next.bv_len > max_size ||
53 		    !biovec_phys_mergeable(req->q, &bv, &next))
54 			break;
55 
56 		bv.bv_len += next.bv_len;
57 		bio_advance_iter_single(iter->bio, &iter->iter, next.bv_len);
58 	}
59 
60 	vec->len = bv.bv_len;
61 	return true;
62 }
63 
64 static inline struct scatterlist *
65 blk_next_sg(struct scatterlist **sg, struct scatterlist *sglist)
66 {
67 	if (!*sg)
68 		return sglist;
69 
70 	/*
71 	 * If the driver previously mapped a shorter list, we could see a
72 	 * termination bit prematurely unless it fully inits the sg table
73 	 * on each mapping. We KNOW that there must be more entries here
74 	 * or the driver would be buggy, so force clear the termination bit
75 	 * to avoid doing a full sg_init_table() in drivers for each command.
76 	 */
77 	sg_unmark_end(*sg);
78 	return sg_next(*sg);
79 }
80 
81 /*
82  * Map a request to scatterlist, return number of sg entries setup. Caller
83  * must make sure sg can hold rq->nr_phys_segments entries.
84  */
85 int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
86 		    struct scatterlist **last_sg)
87 {
88 	struct req_iterator iter = {
89 		.bio	= rq->bio,
90 	};
91 	struct phys_vec vec;
92 	int nsegs = 0;
93 
94 	/* the internal flush request may not have bio attached */
95 	if (iter.bio)
96 		iter.iter = iter.bio->bi_iter;
97 
98 	while (blk_map_iter_next(rq, &iter, &vec)) {
99 		*last_sg = blk_next_sg(last_sg, sglist);
100 		sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len,
101 				offset_in_page(vec.paddr));
102 		nsegs++;
103 	}
104 
105 	if (*last_sg)
106 		sg_mark_end(*last_sg);
107 
108 	/*
109 	 * Something must have been wrong if the figured number of
110 	 * segment is bigger than number of req's physical segments
111 	 */
112 	WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
113 
114 	return nsegs;
115 }
116 EXPORT_SYMBOL(__blk_rq_map_sg);
117