blk-core.c (71fe07d040626de7b72244bf6de889c2e0f5aea3) blk-core.c (320ae51feed5c2f13664aa05a76bec198967e04d)
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7 * - July2000
8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9 */
10
11/*
12 * This handles all read/write requests to block devices
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/backing-dev.h>
17#include <linux/bio.h>
18#include <linux/blkdev.h>
1/*
2 * Copyright (C) 1991, 1992 Linus Torvalds
3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics
4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
6 * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au>
7 * - July2000
8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
9 */
10
11/*
12 * This handles all read/write requests to block devices
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/backing-dev.h>
17#include <linux/bio.h>
18#include <linux/blkdev.h>
19#include <linux/blk-mq.h>
19#include <linux/highmem.h>
20#include <linux/mm.h>
21#include <linux/kernel_stat.h>
22#include <linux/string.h>
23#include <linux/init.h>
24#include <linux/completion.h>
25#include <linux/slab.h>
26#include <linux/swap.h>

--- 16 unchanged lines hidden (view full) ---

43EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
44EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
45
46DEFINE_IDA(blk_queue_ida);
47
48/*
49 * For the allocated request tables
50 */
20#include <linux/highmem.h>
21#include <linux/mm.h>
22#include <linux/kernel_stat.h>
23#include <linux/string.h>
24#include <linux/init.h>
25#include <linux/completion.h>
26#include <linux/slab.h>
27#include <linux/swap.h>

--- 16 unchanged lines hidden (view full) ---

44EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
45EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
46
47DEFINE_IDA(blk_queue_ida);
48
49/*
50 * For the allocated request tables
51 */
51static struct kmem_cache *request_cachep;
52struct kmem_cache *request_cachep = NULL;
52
53/*
54 * For queue allocation
55 */
56struct kmem_cache *blk_requestq_cachep;
57
58/*
59 * Controlling structure to kblockd
60 */
61static struct workqueue_struct *kblockd_workqueue;
62
53
54/*
55 * For queue allocation
56 */
57struct kmem_cache *blk_requestq_cachep;
58
59/*
60 * Controlling structure to kblockd
61 */
62static struct workqueue_struct *kblockd_workqueue;
63
63static void drive_stat_acct(struct request *rq, int new_io)
64{
65 struct hd_struct *part;
66 int rw = rq_data_dir(rq);
67 int cpu;
68
69 if (!blk_do_io_stat(rq))
70 return;
71
72 cpu = part_stat_lock();
73
74 if (!new_io) {
75 part = rq->part;
76 part_stat_inc(cpu, part, merges[rw]);
77 } else {
78 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
79 if (!hd_struct_try_get(part)) {
80 /*
81 * The partition is already being removed,
82 * the request will be accounted on the disk only
83 *
84 * We take a reference on disk->part0 although that
85 * partition will never be deleted, so we can treat
86 * it as any other partition.
87 */
88 part = &rq->rq_disk->part0;
89 hd_struct_get(part);
90 }
91 part_round_stats(cpu, part);
92 part_inc_in_flight(part, rw);
93 rq->part = part;
94 }
95
96 part_stat_unlock();
97}
98
99void blk_queue_congestion_threshold(struct request_queue *q)
100{
101 int nr;
102
103 nr = q->nr_requests - (q->nr_requests / 8) + 1;
104 if (nr > q->nr_requests)
105 nr = q->nr_requests;
106 q->nr_congestion_on = nr;

--- 482 unchanged lines hidden (view full) ---

589 struct request_queue *q;
590 int err;
591
592 q = kmem_cache_alloc_node(blk_requestq_cachep,
593 gfp_mask | __GFP_ZERO, node_id);
594 if (!q)
595 return NULL;
596
64void blk_queue_congestion_threshold(struct request_queue *q)
65{
66 int nr;
67
68 nr = q->nr_requests - (q->nr_requests / 8) + 1;
69 if (nr > q->nr_requests)
70 nr = q->nr_requests;
71 q->nr_congestion_on = nr;

--- 482 unchanged lines hidden (view full) ---

554 struct request_queue *q;
555 int err;
556
557 q = kmem_cache_alloc_node(blk_requestq_cachep,
558 gfp_mask | __GFP_ZERO, node_id);
559 if (!q)
560 return NULL;
561
562 if (percpu_counter_init(&q->mq_usage_counter, 0))
563 goto fail_q;
564
597 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
598 if (q->id < 0)
565 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
566 if (q->id < 0)
599 goto fail_q;
567 goto fail_c;
600
601 q->backing_dev_info.ra_pages =
602 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
603 q->backing_dev_info.state = 0;
604 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
605 q->backing_dev_info.name = "block";
606 q->node = node_id;
607

--- 30 unchanged lines hidden (view full) ---

638 * A queue starts its life with bypass turned on to avoid
639 * unnecessary bypass on/off overhead and nasty surprises during
640 * init. The initial bypass will be finished when the queue is
641 * registered by blk_register_queue().
642 */
643 q->bypass_depth = 1;
644 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
645
568
569 q->backing_dev_info.ra_pages =
570 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
571 q->backing_dev_info.state = 0;
572 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
573 q->backing_dev_info.name = "block";
574 q->node = node_id;
575

--- 30 unchanged lines hidden (view full) ---

606 * A queue starts its life with bypass turned on to avoid
607 * unnecessary bypass on/off overhead and nasty surprises during
608 * init. The initial bypass will be finished when the queue is
609 * registered by blk_register_queue().
610 */
611 q->bypass_depth = 1;
612 __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
613
614 init_waitqueue_head(&q->mq_freeze_wq);
615
646 if (blkcg_init_queue(q))
647 goto fail_id;
648
649 return q;
650
651fail_id:
652 ida_simple_remove(&blk_queue_ida, q->id);
616 if (blkcg_init_queue(q))
617 goto fail_id;
618
619 return q;
620
621fail_id:
622 ida_simple_remove(&blk_queue_ida, q->id);
623fail_c:
624 percpu_counter_destroy(&q->mq_usage_counter);
653fail_q:
654 kmem_cache_free(blk_requestq_cachep, q);
655 return NULL;
656}
657EXPORT_SYMBOL(blk_alloc_queue_node);
658
659/**
660 * blk_init_queue - prepare a request queue for use with a block device

--- 442 unchanged lines hidden (view full) ---

1103 ioc_set_batching(q, current->io_context);
1104
1105 spin_lock_irq(q->queue_lock);
1106 finish_wait(&rl->wait[is_sync], &wait);
1107
1108 goto retry;
1109}
1110
625fail_q:
626 kmem_cache_free(blk_requestq_cachep, q);
627 return NULL;
628}
629EXPORT_SYMBOL(blk_alloc_queue_node);
630
631/**
632 * blk_init_queue - prepare a request queue for use with a block device

--- 442 unchanged lines hidden (view full) ---

1075 ioc_set_batching(q, current->io_context);
1076
1077 spin_lock_irq(q->queue_lock);
1078 finish_wait(&rl->wait[is_sync], &wait);
1079
1080 goto retry;
1081}
1082
1111struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
1083static struct request *blk_old_get_request(struct request_queue *q, int rw,
1084 gfp_t gfp_mask)
1112{
1113 struct request *rq;
1114
1115 BUG_ON(rw != READ && rw != WRITE);
1116
1117 /* create ioc upfront */
1118 create_io_context(gfp_mask, q->node);
1119
1120 spin_lock_irq(q->queue_lock);
1121 rq = get_request(q, rw, NULL, gfp_mask);
1122 if (!rq)
1123 spin_unlock_irq(q->queue_lock);
1124 /* q->queue_lock is unlocked at this point */
1125
1126 return rq;
1127}
1085{
1086 struct request *rq;
1087
1088 BUG_ON(rw != READ && rw != WRITE);
1089
1090 /* create ioc upfront */
1091 create_io_context(gfp_mask, q->node);
1092
1093 spin_lock_irq(q->queue_lock);
1094 rq = get_request(q, rw, NULL, gfp_mask);
1095 if (!rq)
1096 spin_unlock_irq(q->queue_lock);
1097 /* q->queue_lock is unlocked at this point */
1098
1099 return rq;
1100}
1101
1102struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
1103{
1104 if (q->mq_ops)
1105 return blk_mq_alloc_request(q, rw, gfp_mask);
1106 else
1107 return blk_old_get_request(q, rw, gfp_mask);
1108}
1128EXPORT_SYMBOL(blk_get_request);
1129
1130/**
1131 * blk_make_request - given a bio, allocate a corresponding struct request.
1132 * @q: target request queue
1133 * @bio: The bio describing the memory mappings that will be submitted for IO.
1134 * It may be a chained-bio properly constructed by block/bio layer.
1135 * @gfp_mask: gfp flags to be used for memory allocation

--- 69 unchanged lines hidden (view full) ---

1205
1206 elv_requeue_request(q, rq);
1207}
1208EXPORT_SYMBOL(blk_requeue_request);
1209
1210static void add_acct_request(struct request_queue *q, struct request *rq,
1211 int where)
1212{
1109EXPORT_SYMBOL(blk_get_request);
1110
1111/**
1112 * blk_make_request - given a bio, allocate a corresponding struct request.
1113 * @q: target request queue
1114 * @bio: The bio describing the memory mappings that will be submitted for IO.
1115 * It may be a chained-bio properly constructed by block/bio layer.
1116 * @gfp_mask: gfp flags to be used for memory allocation

--- 69 unchanged lines hidden (view full) ---

1186
1187 elv_requeue_request(q, rq);
1188}
1189EXPORT_SYMBOL(blk_requeue_request);
1190
1191static void add_acct_request(struct request_queue *q, struct request *rq,
1192 int where)
1193{
1213 drive_stat_acct(rq, 1);
1194 blk_account_io_start(rq, true);
1214 __elv_add_request(q, rq, where);
1215}
1216
1217static void part_round_stats_single(int cpu, struct hd_struct *part,
1218 unsigned long now)
1219{
1220 if (now == part->stamp)
1221 return;

--- 72 unchanged lines hidden (view full) ---

1294 freed_request(rl, flags);
1295 blk_put_rl(rl);
1296 }
1297}
1298EXPORT_SYMBOL_GPL(__blk_put_request);
1299
1300void blk_put_request(struct request *req)
1301{
1195 __elv_add_request(q, rq, where);
1196}
1197
1198static void part_round_stats_single(int cpu, struct hd_struct *part,
1199 unsigned long now)
1200{
1201 if (now == part->stamp)
1202 return;

--- 72 unchanged lines hidden (view full) ---

1275 freed_request(rl, flags);
1276 blk_put_rl(rl);
1277 }
1278}
1279EXPORT_SYMBOL_GPL(__blk_put_request);
1280
1281void blk_put_request(struct request *req)
1282{
1302 unsigned long flags;
1303 struct request_queue *q = req->q;
1304
1283 struct request_queue *q = req->q;
1284
1305 spin_lock_irqsave(q->queue_lock, flags);
1306 __blk_put_request(q, req);
1307 spin_unlock_irqrestore(q->queue_lock, flags);
1285 if (q->mq_ops)
1286 blk_mq_free_request(req);
1287 else {
1288 unsigned long flags;
1289
1290 spin_lock_irqsave(q->queue_lock, flags);
1291 __blk_put_request(q, req);
1292 spin_unlock_irqrestore(q->queue_lock, flags);
1293 }
1308}
1309EXPORT_SYMBOL(blk_put_request);
1310
1311/**
1312 * blk_add_request_payload - add a payload to a request
1313 * @rq: request to update
1314 * @page: page backing the payload
1315 * @len: length of the payload.

--- 19 unchanged lines hidden (view full) ---

1335 bio->bi_phys_segments = 1;
1336
1337 rq->__data_len = rq->resid_len = len;
1338 rq->nr_phys_segments = 1;
1339 rq->buffer = bio_data(bio);
1340}
1341EXPORT_SYMBOL_GPL(blk_add_request_payload);
1342
1294}
1295EXPORT_SYMBOL(blk_put_request);
1296
1297/**
1298 * blk_add_request_payload - add a payload to a request
1299 * @rq: request to update
1300 * @page: page backing the payload
1301 * @len: length of the payload.

--- 19 unchanged lines hidden (view full) ---

1321 bio->bi_phys_segments = 1;
1322
1323 rq->__data_len = rq->resid_len = len;
1324 rq->nr_phys_segments = 1;
1325 rq->buffer = bio_data(bio);
1326}
1327EXPORT_SYMBOL_GPL(blk_add_request_payload);
1328
1343static bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1344 struct bio *bio)
1329bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1330 struct bio *bio)
1345{
1346 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1347
1348 if (!ll_back_merge_fn(q, req, bio))
1349 return false;
1350
1351 trace_block_bio_backmerge(q, req, bio);
1352
1353 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1354 blk_rq_set_mixed_merge(req);
1355
1356 req->biotail->bi_next = bio;
1357 req->biotail = bio;
1358 req->__data_len += bio->bi_size;
1359 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1360
1331{
1332 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1333
1334 if (!ll_back_merge_fn(q, req, bio))
1335 return false;
1336
1337 trace_block_bio_backmerge(q, req, bio);
1338
1339 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1340 blk_rq_set_mixed_merge(req);
1341
1342 req->biotail->bi_next = bio;
1343 req->biotail = bio;
1344 req->__data_len += bio->bi_size;
1345 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1346
1361 drive_stat_acct(req, 0);
1347 blk_account_io_start(req, false);
1362 return true;
1363}
1364
1348 return true;
1349}
1350
1365static bool bio_attempt_front_merge(struct request_queue *q,
1366 struct request *req, struct bio *bio)
1351bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
1352 struct bio *bio)
1367{
1368 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1369
1370 if (!ll_front_merge_fn(q, req, bio))
1371 return false;
1372
1373 trace_block_bio_frontmerge(q, req, bio);
1374

--- 8 unchanged lines hidden (view full) ---

1383 * it didn't need a bounce buffer then it better
1384 * not touch req->buffer either...
1385 */
1386 req->buffer = bio_data(bio);
1387 req->__sector = bio->bi_sector;
1388 req->__data_len += bio->bi_size;
1389 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1390
1353{
1354 const int ff = bio->bi_rw & REQ_FAILFAST_MASK;
1355
1356 if (!ll_front_merge_fn(q, req, bio))
1357 return false;
1358
1359 trace_block_bio_frontmerge(q, req, bio);
1360

--- 8 unchanged lines hidden (view full) ---

1369 * it didn't need a bounce buffer then it better
1370 * not touch req->buffer either...
1371 */
1372 req->buffer = bio_data(bio);
1373 req->__sector = bio->bi_sector;
1374 req->__data_len += bio->bi_size;
1375 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1376
1391 drive_stat_acct(req, 0);
1377 blk_account_io_start(req, false);
1392 return true;
1393}
1394
1395/**
1378 return true;
1379}
1380
1381/**
1396 * attempt_plug_merge - try to merge with %current's plugged list
1382 * blk_attempt_plug_merge - try to merge with %current's plugged list
1397 * @q: request_queue new bio is being queued at
1398 * @bio: new bio being queued
1399 * @request_count: out parameter for number of traversed plugged requests
1400 *
1401 * Determine whether @bio being queued on @q can be merged with a request
1402 * on %current's plugged list. Returns %true if merge was successful,
1403 * otherwise %false.
1404 *
1405 * Plugging coalesces IOs from the same issuer for the same purpose without
1406 * going through @q->queue_lock. As such it's more of an issuing mechanism
1407 * than scheduling, and the request, while may have elvpriv data, is not
1408 * added on the elevator at this point. In addition, we don't have
1409 * reliable access to the elevator outside queue lock. Only check basic
1410 * merging parameters without querying the elevator.
1411 */
1383 * @q: request_queue new bio is being queued at
1384 * @bio: new bio being queued
1385 * @request_count: out parameter for number of traversed plugged requests
1386 *
1387 * Determine whether @bio being queued on @q can be merged with a request
1388 * on %current's plugged list. Returns %true if merge was successful,
1389 * otherwise %false.
1390 *
1391 * Plugging coalesces IOs from the same issuer for the same purpose without
1392 * going through @q->queue_lock. As such it's more of an issuing mechanism
1393 * than scheduling, and the request, while may have elvpriv data, is not
1394 * added on the elevator at this point. In addition, we don't have
1395 * reliable access to the elevator outside queue lock. Only check basic
1396 * merging parameters without querying the elevator.
1397 */
1412static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
1413 unsigned int *request_count)
1398bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1399 unsigned int *request_count)
1414{
1415 struct blk_plug *plug;
1416 struct request *rq;
1417 bool ret = false;
1418
1419 plug = current->plug;
1420 if (!plug)
1421 goto out;

--- 62 unchanged lines hidden (view full) ---

1484 where = ELEVATOR_INSERT_FLUSH;
1485 goto get_rq;
1486 }
1487
1488 /*
1489 * Check if we can merge with the plugged list before grabbing
1490 * any locks.
1491 */
1400{
1401 struct blk_plug *plug;
1402 struct request *rq;
1403 bool ret = false;
1404
1405 plug = current->plug;
1406 if (!plug)
1407 goto out;

--- 62 unchanged lines hidden (view full) ---

1470 where = ELEVATOR_INSERT_FLUSH;
1471 goto get_rq;
1472 }
1473
1474 /*
1475 * Check if we can merge with the plugged list before grabbing
1476 * any locks.
1477 */
1492 if (attempt_plug_merge(q, bio, &request_count))
1478 if (blk_attempt_plug_merge(q, bio, &request_count))
1493 return;
1494
1495 spin_lock_irq(q->queue_lock);
1496
1497 el_ret = elv_merge(q, &req, bio);
1498 if (el_ret == ELEVATOR_BACK_MERGE) {
1499 if (bio_attempt_back_merge(q, req, bio)) {
1500 elv_bio_merged(q, req, bio);

--- 51 unchanged lines hidden (view full) ---

1552 trace_block_plug(q);
1553 else {
1554 if (request_count >= BLK_MAX_REQUEST_COUNT) {
1555 blk_flush_plug_list(plug, false);
1556 trace_block_plug(q);
1557 }
1558 }
1559 list_add_tail(&req->queuelist, &plug->list);
1479 return;
1480
1481 spin_lock_irq(q->queue_lock);
1482
1483 el_ret = elv_merge(q, &req, bio);
1484 if (el_ret == ELEVATOR_BACK_MERGE) {
1485 if (bio_attempt_back_merge(q, req, bio)) {
1486 elv_bio_merged(q, req, bio);

--- 51 unchanged lines hidden (view full) ---

1538 trace_block_plug(q);
1539 else {
1540 if (request_count >= BLK_MAX_REQUEST_COUNT) {
1541 blk_flush_plug_list(plug, false);
1542 trace_block_plug(q);
1543 }
1544 }
1545 list_add_tail(&req->queuelist, &plug->list);
1560 drive_stat_acct(req, 1);
1546 blk_account_io_start(req, true);
1561 } else {
1562 spin_lock_irq(q->queue_lock);
1563 add_acct_request(q, req, where);
1564 __blk_run_queue(q);
1565out_unlock:
1566 spin_unlock_irq(q->queue_lock);
1567 }
1568}

--- 437 unchanged lines hidden (view full) ---

2006 }
2007
2008 /* this could lead to infinite loop */
2009 BUG_ON(blk_rq_bytes(rq) && !bytes);
2010 return bytes;
2011}
2012EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
2013
1547 } else {
1548 spin_lock_irq(q->queue_lock);
1549 add_acct_request(q, req, where);
1550 __blk_run_queue(q);
1551out_unlock:
1552 spin_unlock_irq(q->queue_lock);
1553 }
1554}

--- 437 unchanged lines hidden (view full) ---

1992 }
1993
1994 /* this could lead to infinite loop */
1995 BUG_ON(blk_rq_bytes(rq) && !bytes);
1996 return bytes;
1997}
1998EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
1999
2014static void blk_account_io_completion(struct request *req, unsigned int bytes)
2000void blk_account_io_completion(struct request *req, unsigned int bytes)
2015{
2016 if (blk_do_io_stat(req)) {
2017 const int rw = rq_data_dir(req);
2018 struct hd_struct *part;
2019 int cpu;
2020
2021 cpu = part_stat_lock();
2022 part = req->part;
2023 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
2024 part_stat_unlock();
2025 }
2026}
2027
2001{
2002 if (blk_do_io_stat(req)) {
2003 const int rw = rq_data_dir(req);
2004 struct hd_struct *part;
2005 int cpu;
2006
2007 cpu = part_stat_lock();
2008 part = req->part;
2009 part_stat_add(cpu, part, sectors[rw], bytes >> 9);
2010 part_stat_unlock();
2011 }
2012}
2013
2028static void blk_account_io_done(struct request *req)
2014void blk_account_io_done(struct request *req)
2029{
2030 /*
2031 * Account IO completion. flush_rq isn't accounted as a
2032 * normal IO on queueing nor completion. Accounting the
2033 * containing request is enough.
2034 */
2035 if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) {
2036 unsigned long duration = jiffies - req->start_time;

--- 31 unchanged lines hidden (view full) ---

2068#else
2069static inline struct request *blk_pm_peek_request(struct request_queue *q,
2070 struct request *rq)
2071{
2072 return rq;
2073}
2074#endif
2075
2015{
2016 /*
2017 * Account IO completion. flush_rq isn't accounted as a
2018 * normal IO on queueing nor completion. Accounting the
2019 * containing request is enough.
2020 */
2021 if (blk_do_io_stat(req) && !(req->cmd_flags & REQ_FLUSH_SEQ)) {
2022 unsigned long duration = jiffies - req->start_time;

--- 31 unchanged lines hidden (view full) ---

2054#else
2055static inline struct request *blk_pm_peek_request(struct request_queue *q,
2056 struct request *rq)
2057{
2058 return rq;
2059}
2060#endif
2061
2062void blk_account_io_start(struct request *rq, bool new_io)
2063{
2064 struct hd_struct *part;
2065 int rw = rq_data_dir(rq);
2066 int cpu;
2067
2068 if (!blk_do_io_stat(rq))
2069 return;
2070
2071 cpu = part_stat_lock();
2072
2073 if (!new_io) {
2074 part = rq->part;
2075 part_stat_inc(cpu, part, merges[rw]);
2076 } else {
2077 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq));
2078 if (!hd_struct_try_get(part)) {
2079 /*
2080 * The partition is already being removed,
2081 * the request will be accounted on the disk only
2082 *
2083 * We take a reference on disk->part0 although that
2084 * partition will never be deleted, so we can treat
2085 * it as any other partition.
2086 */
2087 part = &rq->rq_disk->part0;
2088 hd_struct_get(part);
2089 }
2090 part_round_stats(cpu, part);
2091 part_inc_in_flight(part, rw);
2092 rq->part = part;
2093 }
2094
2095 part_stat_unlock();
2096}
2097
2076/**
2077 * blk_peek_request - peek at the top of a request queue
2078 * @q: request queue to peek at
2079 *
2080 * Description:
2081 * Return the request at the top of @q. The returned request
2082 * should be started using blk_start_request() before LLD starts
2083 * processing it.

--- 359 unchanged lines hidden (view full) ---

2443 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
2444 laptop_io_completion(&req->q->backing_dev_info);
2445
2446 blk_delete_timer(req);
2447
2448 if (req->cmd_flags & REQ_DONTPREP)
2449 blk_unprep_request(req);
2450
2098/**
2099 * blk_peek_request - peek at the top of a request queue
2100 * @q: request queue to peek at
2101 *
2102 * Description:
2103 * Return the request at the top of @q. The returned request
2104 * should be started using blk_start_request() before LLD starts
2105 * processing it.

--- 359 unchanged lines hidden (view full) ---

2465 if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS)
2466 laptop_io_completion(&req->q->backing_dev_info);
2467
2468 blk_delete_timer(req);
2469
2470 if (req->cmd_flags & REQ_DONTPREP)
2471 blk_unprep_request(req);
2472
2451
2452 blk_account_io_done(req);
2453
2454 if (req->end_io)
2455 req->end_io(req, error);
2456 else {
2457 if (blk_bidi_rq(req))
2458 __blk_put_request(req->next_rq->q, req->next_rq);
2459

--- 405 unchanged lines hidden (view full) ---

2865 * this kind of deadlock.
2866 */
2867void blk_start_plug(struct blk_plug *plug)
2868{
2869 struct task_struct *tsk = current;
2870
2871 plug->magic = PLUG_MAGIC;
2872 INIT_LIST_HEAD(&plug->list);
2473 blk_account_io_done(req);
2474
2475 if (req->end_io)
2476 req->end_io(req, error);
2477 else {
2478 if (blk_bidi_rq(req))
2479 __blk_put_request(req->next_rq->q, req->next_rq);
2480

--- 405 unchanged lines hidden (view full) ---

2886 * this kind of deadlock.
2887 */
2888void blk_start_plug(struct blk_plug *plug)
2889{
2890 struct task_struct *tsk = current;
2891
2892 plug->magic = PLUG_MAGIC;
2893 INIT_LIST_HEAD(&plug->list);
2894 INIT_LIST_HEAD(&plug->mq_list);
2873 INIT_LIST_HEAD(&plug->cb_list);
2874
2875 /*
2876 * If this is a nested plug, don't actually assign it. It will be
2877 * flushed on its own.
2878 */
2879 if (!tsk->plug) {
2880 /*

--- 81 unchanged lines hidden (view full) ---

2962 unsigned long flags;
2963 struct request *rq;
2964 LIST_HEAD(list);
2965 unsigned int depth;
2966
2967 BUG_ON(plug->magic != PLUG_MAGIC);
2968
2969 flush_plug_callbacks(plug, from_schedule);
2895 INIT_LIST_HEAD(&plug->cb_list);
2896
2897 /*
2898 * If this is a nested plug, don't actually assign it. It will be
2899 * flushed on its own.
2900 */
2901 if (!tsk->plug) {
2902 /*

--- 81 unchanged lines hidden (view full) ---

2984 unsigned long flags;
2985 struct request *rq;
2986 LIST_HEAD(list);
2987 unsigned int depth;
2988
2989 BUG_ON(plug->magic != PLUG_MAGIC);
2990
2991 flush_plug_callbacks(plug, from_schedule);
2992
2993 if (!list_empty(&plug->mq_list))
2994 blk_mq_flush_plug_list(plug, from_schedule);
2995
2970 if (list_empty(&plug->list))
2971 return;
2972
2973 list_splice_init(&plug->list, &list);
2974
2975 list_sort(NULL, &list, plug_rq_cmp);
2976
2977 q = NULL;

--- 222 unchanged lines hidden ---
2996 if (list_empty(&plug->list))
2997 return;
2998
2999 list_splice_init(&plug->list, &list);
3000
3001 list_sort(NULL, &list, plug_rq_cmp);
3002
3003 q = NULL;

--- 222 unchanged lines hidden ---