xref: /linux/block/blk-merge.c (revision e2fc4d19292ef2eb208f76976ddc3320cc5839b6)
1 /*
2  * Functions related to segment and merge handling
3  */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/bio.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
9 
10 #include "blk.h"
11 
12 void blk_recalc_rq_sectors(struct request *rq, int nsect)
13 {
14 	if (blk_fs_request(rq) || blk_discard_rq(rq)) {
15 		rq->hard_sector += nsect;
16 		rq->hard_nr_sectors -= nsect;
17 
18 		/*
19 		 * Move the I/O submission pointers ahead if required.
20 		 */
21 		if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
22 		    (rq->sector <= rq->hard_sector)) {
23 			rq->sector = rq->hard_sector;
24 			rq->nr_sectors = rq->hard_nr_sectors;
25 			rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
26 			rq->current_nr_sectors = rq->hard_cur_sectors;
27 			rq->buffer = bio_data(rq->bio);
28 		}
29 
30 		/*
31 		 * if total number of sectors is less than the first segment
32 		 * size, something has gone terribly wrong
33 		 */
34 		if (rq->nr_sectors < rq->current_nr_sectors) {
35 			printk(KERN_ERR "blk: request botched\n");
36 			rq->nr_sectors = rq->current_nr_sectors;
37 		}
38 	}
39 }
40 
41 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
42 					     struct bio *bio,
43 					     unsigned int *seg_size_ptr)
44 {
45 	unsigned int phys_size;
46 	struct bio_vec *bv, *bvprv = NULL;
47 	int cluster, i, high, highprv = 1;
48 	unsigned int seg_size, nr_phys_segs;
49 	struct bio *fbio;
50 
51 	if (!bio)
52 		return 0;
53 
54 	fbio = bio;
55 	cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
56 	seg_size = 0;
57 	phys_size = nr_phys_segs = 0;
58 	for_each_bio(bio) {
59 		bio_for_each_segment(bv, bio, i) {
60 			/*
61 			 * the trick here is making sure that a high page is
62 			 * never considered part of another segment, since that
63 			 * might change with the bounce page.
64 			 */
65 			high = page_to_pfn(bv->bv_page) > q->bounce_pfn;
66 			if (high || highprv)
67 				goto new_segment;
68 			if (cluster) {
69 				if (seg_size + bv->bv_len > q->max_segment_size)
70 					goto new_segment;
71 				if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
72 					goto new_segment;
73 				if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
74 					goto new_segment;
75 
76 				seg_size += bv->bv_len;
77 				bvprv = bv;
78 				continue;
79 			}
80 new_segment:
81 			if (nr_phys_segs == 1 && seg_size >
82 			    fbio->bi_seg_front_size)
83 				fbio->bi_seg_front_size = seg_size;
84 
85 			nr_phys_segs++;
86 			bvprv = bv;
87 			seg_size = bv->bv_len;
88 			highprv = high;
89 		}
90 	}
91 
92 	if (seg_size_ptr)
93 		*seg_size_ptr = seg_size;
94 
95 	return nr_phys_segs;
96 }
97 
98 void blk_recalc_rq_segments(struct request *rq)
99 {
100 	unsigned int seg_size = 0, phys_segs;
101 
102 	phys_segs = __blk_recalc_rq_segments(rq->q, rq->bio, &seg_size);
103 
104 	if (phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
105 		rq->bio->bi_seg_front_size = seg_size;
106 	if (seg_size > rq->biotail->bi_seg_back_size)
107 		rq->biotail->bi_seg_back_size = seg_size;
108 
109 	rq->nr_phys_segments = phys_segs;
110 }
111 
112 void blk_recount_segments(struct request_queue *q, struct bio *bio)
113 {
114 	struct bio *nxt = bio->bi_next;
115 
116 	bio->bi_next = NULL;
117 	bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, NULL);
118 	bio->bi_next = nxt;
119 	bio->bi_flags |= (1 << BIO_SEG_VALID);
120 }
121 EXPORT_SYMBOL(blk_recount_segments);
122 
123 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
124 				   struct bio *nxt)
125 {
126 	if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
127 		return 0;
128 
129 	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
130 	    q->max_segment_size)
131 		return 0;
132 
133 	if (!bio_has_data(bio))
134 		return 1;
135 
136 	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
137 		return 0;
138 
139 	/*
140 	 * bio and nxt are contiguous in memory; check if the queue allows
141 	 * these two to be merged into one
142 	 */
143 	if (BIO_SEG_BOUNDARY(q, bio, nxt))
144 		return 1;
145 
146 	return 0;
147 }
148 
149 /*
150  * map a request to scatterlist, return number of sg entries setup. Caller
151  * must make sure sg can hold rq->nr_phys_segments entries
152  */
153 int blk_rq_map_sg(struct request_queue *q, struct request *rq,
154 		  struct scatterlist *sglist)
155 {
156 	struct bio_vec *bvec, *bvprv;
157 	struct req_iterator iter;
158 	struct scatterlist *sg;
159 	int nsegs, cluster;
160 
161 	nsegs = 0;
162 	cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
163 
164 	/*
165 	 * for each bio in rq
166 	 */
167 	bvprv = NULL;
168 	sg = NULL;
169 	rq_for_each_segment(bvec, rq, iter) {
170 		int nbytes = bvec->bv_len;
171 
172 		if (bvprv && cluster) {
173 			if (sg->length + nbytes > q->max_segment_size)
174 				goto new_segment;
175 
176 			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
177 				goto new_segment;
178 			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
179 				goto new_segment;
180 
181 			sg->length += nbytes;
182 		} else {
183 new_segment:
184 			if (!sg)
185 				sg = sglist;
186 			else {
187 				/*
188 				 * If the driver previously mapped a shorter
189 				 * list, we could see a termination bit
190 				 * prematurely unless it fully inits the sg
191 				 * table on each mapping. We KNOW that there
192 				 * must be more entries here or the driver
193 				 * would be buggy, so force clear the
194 				 * termination bit to avoid doing a full
195 				 * sg_init_table() in drivers for each command.
196 				 */
197 				sg->page_link &= ~0x02;
198 				sg = sg_next(sg);
199 			}
200 
201 			sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset);
202 			nsegs++;
203 		}
204 		bvprv = bvec;
205 	} /* segments in rq */
206 
207 
208 	if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
209 	    (rq->data_len & q->dma_pad_mask)) {
210 		unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1;
211 
212 		sg->length += pad_len;
213 		rq->extra_len += pad_len;
214 	}
215 
216 	if (q->dma_drain_size && q->dma_drain_needed(rq)) {
217 		if (rq->cmd_flags & REQ_RW)
218 			memset(q->dma_drain_buffer, 0, q->dma_drain_size);
219 
220 		sg->page_link &= ~0x02;
221 		sg = sg_next(sg);
222 		sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
223 			    q->dma_drain_size,
224 			    ((unsigned long)q->dma_drain_buffer) &
225 			    (PAGE_SIZE - 1));
226 		nsegs++;
227 		rq->extra_len += q->dma_drain_size;
228 	}
229 
230 	if (sg)
231 		sg_mark_end(sg);
232 
233 	return nsegs;
234 }
235 EXPORT_SYMBOL(blk_rq_map_sg);
236 
237 static inline int ll_new_hw_segment(struct request_queue *q,
238 				    struct request *req,
239 				    struct bio *bio)
240 {
241 	int nr_phys_segs = bio_phys_segments(q, bio);
242 
243 	if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments
244 	    || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
245 		req->cmd_flags |= REQ_NOMERGE;
246 		if (req == q->last_merge)
247 			q->last_merge = NULL;
248 		return 0;
249 	}
250 
251 	/*
252 	 * This will form the start of a new hw segment.  Bump both
253 	 * counters.
254 	 */
255 	req->nr_phys_segments += nr_phys_segs;
256 	return 1;
257 }
258 
259 int ll_back_merge_fn(struct request_queue *q, struct request *req,
260 		     struct bio *bio)
261 {
262 	unsigned short max_sectors;
263 
264 	if (unlikely(blk_pc_request(req)))
265 		max_sectors = q->max_hw_sectors;
266 	else
267 		max_sectors = q->max_sectors;
268 
269 	if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
270 		req->cmd_flags |= REQ_NOMERGE;
271 		if (req == q->last_merge)
272 			q->last_merge = NULL;
273 		return 0;
274 	}
275 	if (!bio_flagged(req->biotail, BIO_SEG_VALID))
276 		blk_recount_segments(q, req->biotail);
277 	if (!bio_flagged(bio, BIO_SEG_VALID))
278 		blk_recount_segments(q, bio);
279 
280 	return ll_new_hw_segment(q, req, bio);
281 }
282 
283 int ll_front_merge_fn(struct request_queue *q, struct request *req,
284 		      struct bio *bio)
285 {
286 	unsigned short max_sectors;
287 
288 	if (unlikely(blk_pc_request(req)))
289 		max_sectors = q->max_hw_sectors;
290 	else
291 		max_sectors = q->max_sectors;
292 
293 
294 	if (req->nr_sectors + bio_sectors(bio) > max_sectors) {
295 		req->cmd_flags |= REQ_NOMERGE;
296 		if (req == q->last_merge)
297 			q->last_merge = NULL;
298 		return 0;
299 	}
300 	if (!bio_flagged(bio, BIO_SEG_VALID))
301 		blk_recount_segments(q, bio);
302 	if (!bio_flagged(req->bio, BIO_SEG_VALID))
303 		blk_recount_segments(q, req->bio);
304 
305 	return ll_new_hw_segment(q, req, bio);
306 }
307 
308 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
309 				struct request *next)
310 {
311 	int total_phys_segments;
312 	unsigned int seg_size =
313 		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
314 
315 	/*
316 	 * First check if the either of the requests are re-queued
317 	 * requests.  Can't merge them if they are.
318 	 */
319 	if (req->special || next->special)
320 		return 0;
321 
322 	/*
323 	 * Will it become too large?
324 	 */
325 	if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
326 		return 0;
327 
328 	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
329 	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
330 		if (req->nr_phys_segments == 1)
331 			req->bio->bi_seg_front_size = seg_size;
332 		if (next->nr_phys_segments == 1)
333 			next->biotail->bi_seg_back_size = seg_size;
334 		total_phys_segments--;
335 	}
336 
337 	if (total_phys_segments > q->max_phys_segments)
338 		return 0;
339 
340 	if (total_phys_segments > q->max_hw_segments)
341 		return 0;
342 
343 	/* Merge is OK... */
344 	req->nr_phys_segments = total_phys_segments;
345 	return 1;
346 }
347 
348 /*
349  * Has to be called with the request spinlock acquired
350  */
351 static int attempt_merge(struct request_queue *q, struct request *req,
352 			  struct request *next)
353 {
354 	if (!rq_mergeable(req) || !rq_mergeable(next))
355 		return 0;
356 
357 	/*
358 	 * not contiguous
359 	 */
360 	if (req->sector + req->nr_sectors != next->sector)
361 		return 0;
362 
363 	if (rq_data_dir(req) != rq_data_dir(next)
364 	    || req->rq_disk != next->rq_disk
365 	    || next->special)
366 		return 0;
367 
368 	if (blk_integrity_rq(req) != blk_integrity_rq(next))
369 		return 0;
370 
371 	/*
372 	 * If we are allowed to merge, then append bio list
373 	 * from next to rq and release next. merge_requests_fn
374 	 * will have updated segment counts, update sector
375 	 * counts here.
376 	 */
377 	if (!ll_merge_requests_fn(q, req, next))
378 		return 0;
379 
380 	/*
381 	 * At this point we have either done a back merge
382 	 * or front merge. We need the smaller start_time of
383 	 * the merged requests to be the current request
384 	 * for accounting purposes.
385 	 */
386 	if (time_after(req->start_time, next->start_time))
387 		req->start_time = next->start_time;
388 
389 	req->biotail->bi_next = next->bio;
390 	req->biotail = next->biotail;
391 
392 	req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
393 
394 	elv_merge_requests(q, req, next);
395 
396 	if (req->rq_disk) {
397 		struct hd_struct *part;
398 		int cpu;
399 
400 		cpu = part_stat_lock();
401 		part = disk_map_sector_rcu(req->rq_disk, req->sector);
402 
403 		part_round_stats(cpu, part);
404 		part_dec_in_flight(part);
405 
406 		part_stat_unlock();
407 	}
408 
409 	req->ioprio = ioprio_best(req->ioprio, next->ioprio);
410 	if (blk_rq_cpu_valid(next))
411 		req->cpu = next->cpu;
412 
413 	__blk_put_request(q, next);
414 	return 1;
415 }
416 
417 int attempt_back_merge(struct request_queue *q, struct request *rq)
418 {
419 	struct request *next = elv_latter_request(q, rq);
420 
421 	if (next)
422 		return attempt_merge(q, rq, next);
423 
424 	return 0;
425 }
426 
427 int attempt_front_merge(struct request_queue *q, struct request *rq)
428 {
429 	struct request *prev = elv_former_request(q, rq);
430 
431 	if (prev)
432 		return attempt_merge(q, prev, rq);
433 
434 	return 0;
435 }
436