xref: /linux/block/blk-lib.c (revision f31e7e4022841c43c53b847b86b1bf97a08b2c94)
1*f31e7e40SDmitry Monakhov /*
2*f31e7e40SDmitry Monakhov  * Functions related to generic helpers functions
3*f31e7e40SDmitry Monakhov  */
4*f31e7e40SDmitry Monakhov #include <linux/kernel.h>
5*f31e7e40SDmitry Monakhov #include <linux/module.h>
6*f31e7e40SDmitry Monakhov #include <linux/bio.h>
7*f31e7e40SDmitry Monakhov #include <linux/blkdev.h>
8*f31e7e40SDmitry Monakhov #include <linux/scatterlist.h>
9*f31e7e40SDmitry Monakhov 
10*f31e7e40SDmitry Monakhov #include "blk.h"
11*f31e7e40SDmitry Monakhov 
12*f31e7e40SDmitry Monakhov static void blkdev_discard_end_io(struct bio *bio, int err)
13*f31e7e40SDmitry Monakhov {
14*f31e7e40SDmitry Monakhov 	if (err) {
15*f31e7e40SDmitry Monakhov 		if (err == -EOPNOTSUPP)
16*f31e7e40SDmitry Monakhov 			set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
17*f31e7e40SDmitry Monakhov 		clear_bit(BIO_UPTODATE, &bio->bi_flags);
18*f31e7e40SDmitry Monakhov 	}
19*f31e7e40SDmitry Monakhov 
20*f31e7e40SDmitry Monakhov 	if (bio->bi_private)
21*f31e7e40SDmitry Monakhov 		complete(bio->bi_private);
22*f31e7e40SDmitry Monakhov 	__free_page(bio_page(bio));
23*f31e7e40SDmitry Monakhov 
24*f31e7e40SDmitry Monakhov 	bio_put(bio);
25*f31e7e40SDmitry Monakhov }
26*f31e7e40SDmitry Monakhov 
27*f31e7e40SDmitry Monakhov /**
28*f31e7e40SDmitry Monakhov  * blkdev_issue_discard - queue a discard
29*f31e7e40SDmitry Monakhov  * @bdev:	blockdev to issue discard for
30*f31e7e40SDmitry Monakhov  * @sector:	start sector
31*f31e7e40SDmitry Monakhov  * @nr_sects:	number of sectors to discard
32*f31e7e40SDmitry Monakhov  * @gfp_mask:	memory allocation flags (for bio_alloc)
33*f31e7e40SDmitry Monakhov  * @flags:	BLKDEV_IFL_* flags to control behaviour
34*f31e7e40SDmitry Monakhov  *
35*f31e7e40SDmitry Monakhov  * Description:
36*f31e7e40SDmitry Monakhov  *    Issue a discard request for the sectors in question.
37*f31e7e40SDmitry Monakhov  */
38*f31e7e40SDmitry Monakhov int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
39*f31e7e40SDmitry Monakhov 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
40*f31e7e40SDmitry Monakhov {
41*f31e7e40SDmitry Monakhov 	DECLARE_COMPLETION_ONSTACK(wait);
42*f31e7e40SDmitry Monakhov 	struct request_queue *q = bdev_get_queue(bdev);
43*f31e7e40SDmitry Monakhov 	int type = flags & BLKDEV_IFL_BARRIER ?
44*f31e7e40SDmitry Monakhov 		DISCARD_BARRIER : DISCARD_NOBARRIER;
45*f31e7e40SDmitry Monakhov 	struct bio *bio;
46*f31e7e40SDmitry Monakhov 	struct page *page;
47*f31e7e40SDmitry Monakhov 	int ret = 0;
48*f31e7e40SDmitry Monakhov 
49*f31e7e40SDmitry Monakhov 	if (!q)
50*f31e7e40SDmitry Monakhov 		return -ENXIO;
51*f31e7e40SDmitry Monakhov 
52*f31e7e40SDmitry Monakhov 	if (!blk_queue_discard(q))
53*f31e7e40SDmitry Monakhov 		return -EOPNOTSUPP;
54*f31e7e40SDmitry Monakhov 
55*f31e7e40SDmitry Monakhov 	while (nr_sects && !ret) {
56*f31e7e40SDmitry Monakhov 		unsigned int sector_size = q->limits.logical_block_size;
57*f31e7e40SDmitry Monakhov 		unsigned int max_discard_sectors =
58*f31e7e40SDmitry Monakhov 			min(q->limits.max_discard_sectors, UINT_MAX >> 9);
59*f31e7e40SDmitry Monakhov 
60*f31e7e40SDmitry Monakhov 		bio = bio_alloc(gfp_mask, 1);
61*f31e7e40SDmitry Monakhov 		if (!bio)
62*f31e7e40SDmitry Monakhov 			goto out;
63*f31e7e40SDmitry Monakhov 		bio->bi_sector = sector;
64*f31e7e40SDmitry Monakhov 		bio->bi_end_io = blkdev_discard_end_io;
65*f31e7e40SDmitry Monakhov 		bio->bi_bdev = bdev;
66*f31e7e40SDmitry Monakhov 		if (flags & BLKDEV_IFL_WAIT)
67*f31e7e40SDmitry Monakhov 			bio->bi_private = &wait;
68*f31e7e40SDmitry Monakhov 
69*f31e7e40SDmitry Monakhov 		/*
70*f31e7e40SDmitry Monakhov 		 * Add a zeroed one-sector payload as that's what
71*f31e7e40SDmitry Monakhov 		 * our current implementations need.  If we'll ever need
72*f31e7e40SDmitry Monakhov 		 * more the interface will need revisiting.
73*f31e7e40SDmitry Monakhov 		 */
74*f31e7e40SDmitry Monakhov 		page = alloc_page(gfp_mask | __GFP_ZERO);
75*f31e7e40SDmitry Monakhov 		if (!page)
76*f31e7e40SDmitry Monakhov 			goto out_free_bio;
77*f31e7e40SDmitry Monakhov 		if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size)
78*f31e7e40SDmitry Monakhov 			goto out_free_page;
79*f31e7e40SDmitry Monakhov 
80*f31e7e40SDmitry Monakhov 		/*
81*f31e7e40SDmitry Monakhov 		 * And override the bio size - the way discard works we
82*f31e7e40SDmitry Monakhov 		 * touch many more blocks on disk than the actual payload
83*f31e7e40SDmitry Monakhov 		 * length.
84*f31e7e40SDmitry Monakhov 		 */
85*f31e7e40SDmitry Monakhov 		if (nr_sects > max_discard_sectors) {
86*f31e7e40SDmitry Monakhov 			bio->bi_size = max_discard_sectors << 9;
87*f31e7e40SDmitry Monakhov 			nr_sects -= max_discard_sectors;
88*f31e7e40SDmitry Monakhov 			sector += max_discard_sectors;
89*f31e7e40SDmitry Monakhov 		} else {
90*f31e7e40SDmitry Monakhov 			bio->bi_size = nr_sects << 9;
91*f31e7e40SDmitry Monakhov 			nr_sects = 0;
92*f31e7e40SDmitry Monakhov 		}
93*f31e7e40SDmitry Monakhov 
94*f31e7e40SDmitry Monakhov 		bio_get(bio);
95*f31e7e40SDmitry Monakhov 		submit_bio(type, bio);
96*f31e7e40SDmitry Monakhov 
97*f31e7e40SDmitry Monakhov 		if (flags & BLKDEV_IFL_WAIT)
98*f31e7e40SDmitry Monakhov 			wait_for_completion(&wait);
99*f31e7e40SDmitry Monakhov 
100*f31e7e40SDmitry Monakhov 		if (bio_flagged(bio, BIO_EOPNOTSUPP))
101*f31e7e40SDmitry Monakhov 			ret = -EOPNOTSUPP;
102*f31e7e40SDmitry Monakhov 		else if (!bio_flagged(bio, BIO_UPTODATE))
103*f31e7e40SDmitry Monakhov 			ret = -EIO;
104*f31e7e40SDmitry Monakhov 		bio_put(bio);
105*f31e7e40SDmitry Monakhov 	}
106*f31e7e40SDmitry Monakhov 	return ret;
107*f31e7e40SDmitry Monakhov out_free_page:
108*f31e7e40SDmitry Monakhov 	__free_page(page);
109*f31e7e40SDmitry Monakhov out_free_bio:
110*f31e7e40SDmitry Monakhov 	bio_put(bio);
111*f31e7e40SDmitry Monakhov out:
112*f31e7e40SDmitry Monakhov 	return -ENOMEM;
113*f31e7e40SDmitry Monakhov }
114*f31e7e40SDmitry Monakhov EXPORT_SYMBOL(blkdev_issue_discard);
115