xref: /linux/drivers/md/dm-pcache/backing_dev.c (revision 1d57628ff95b32d5cfa8d8f50e07690c161e9cf0)
1*1d57628fSDongsheng Yang // SPDX-License-Identifier: GPL-2.0-or-later
2*1d57628fSDongsheng Yang #include <linux/blkdev.h>
3*1d57628fSDongsheng Yang 
4*1d57628fSDongsheng Yang #include "../dm-core.h"
5*1d57628fSDongsheng Yang #include "pcache_internal.h"
6*1d57628fSDongsheng Yang #include "cache_dev.h"
7*1d57628fSDongsheng Yang #include "backing_dev.h"
8*1d57628fSDongsheng Yang #include "cache.h"
9*1d57628fSDongsheng Yang #include "dm_pcache.h"
10*1d57628fSDongsheng Yang 
11*1d57628fSDongsheng Yang static struct kmem_cache *backing_req_cache;
12*1d57628fSDongsheng Yang static struct kmem_cache *backing_bvec_cache;
13*1d57628fSDongsheng Yang 
14*1d57628fSDongsheng Yang static void backing_dev_exit(struct pcache_backing_dev *backing_dev)
15*1d57628fSDongsheng Yang {
16*1d57628fSDongsheng Yang 	mempool_exit(&backing_dev->req_pool);
17*1d57628fSDongsheng Yang 	mempool_exit(&backing_dev->bvec_pool);
18*1d57628fSDongsheng Yang }
19*1d57628fSDongsheng Yang 
20*1d57628fSDongsheng Yang static void req_submit_fn(struct work_struct *work);
21*1d57628fSDongsheng Yang static void req_complete_fn(struct work_struct *work);
22*1d57628fSDongsheng Yang static int backing_dev_init(struct dm_pcache *pcache)
23*1d57628fSDongsheng Yang {
24*1d57628fSDongsheng Yang 	struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
25*1d57628fSDongsheng Yang 	int ret;
26*1d57628fSDongsheng Yang 
27*1d57628fSDongsheng Yang 	ret = mempool_init_slab_pool(&backing_dev->req_pool, 128, backing_req_cache);
28*1d57628fSDongsheng Yang 	if (ret)
29*1d57628fSDongsheng Yang 		goto err;
30*1d57628fSDongsheng Yang 
31*1d57628fSDongsheng Yang 	ret = mempool_init_slab_pool(&backing_dev->bvec_pool, 128, backing_bvec_cache);
32*1d57628fSDongsheng Yang 	if (ret)
33*1d57628fSDongsheng Yang 		goto req_pool_exit;
34*1d57628fSDongsheng Yang 
35*1d57628fSDongsheng Yang 	INIT_LIST_HEAD(&backing_dev->submit_list);
36*1d57628fSDongsheng Yang 	INIT_LIST_HEAD(&backing_dev->complete_list);
37*1d57628fSDongsheng Yang 	spin_lock_init(&backing_dev->submit_lock);
38*1d57628fSDongsheng Yang 	spin_lock_init(&backing_dev->complete_lock);
39*1d57628fSDongsheng Yang 	INIT_WORK(&backing_dev->req_submit_work, req_submit_fn);
40*1d57628fSDongsheng Yang 	INIT_WORK(&backing_dev->req_complete_work, req_complete_fn);
41*1d57628fSDongsheng Yang 	atomic_set(&backing_dev->inflight_reqs, 0);
42*1d57628fSDongsheng Yang 	init_waitqueue_head(&backing_dev->inflight_wq);
43*1d57628fSDongsheng Yang 
44*1d57628fSDongsheng Yang 	return 0;
45*1d57628fSDongsheng Yang 
46*1d57628fSDongsheng Yang req_pool_exit:
47*1d57628fSDongsheng Yang 	mempool_exit(&backing_dev->req_pool);
48*1d57628fSDongsheng Yang err:
49*1d57628fSDongsheng Yang 	return ret;
50*1d57628fSDongsheng Yang }
51*1d57628fSDongsheng Yang 
52*1d57628fSDongsheng Yang int backing_dev_start(struct dm_pcache *pcache)
53*1d57628fSDongsheng Yang {
54*1d57628fSDongsheng Yang 	struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
55*1d57628fSDongsheng Yang 	int ret;
56*1d57628fSDongsheng Yang 
57*1d57628fSDongsheng Yang 	ret = backing_dev_init(pcache);
58*1d57628fSDongsheng Yang 	if (ret)
59*1d57628fSDongsheng Yang 		return ret;
60*1d57628fSDongsheng Yang 
61*1d57628fSDongsheng Yang 	backing_dev->dev_size = bdev_nr_sectors(backing_dev->dm_dev->bdev);
62*1d57628fSDongsheng Yang 
63*1d57628fSDongsheng Yang 	return 0;
64*1d57628fSDongsheng Yang }
65*1d57628fSDongsheng Yang 
66*1d57628fSDongsheng Yang void backing_dev_stop(struct dm_pcache *pcache)
67*1d57628fSDongsheng Yang {
68*1d57628fSDongsheng Yang 	struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
69*1d57628fSDongsheng Yang 
70*1d57628fSDongsheng Yang 	/*
71*1d57628fSDongsheng Yang 	 * There should not be any new request comming, just wait
72*1d57628fSDongsheng Yang 	 * inflight requests done.
73*1d57628fSDongsheng Yang 	 */
74*1d57628fSDongsheng Yang 	wait_event(backing_dev->inflight_wq,
75*1d57628fSDongsheng Yang 			atomic_read(&backing_dev->inflight_reqs) == 0);
76*1d57628fSDongsheng Yang 
77*1d57628fSDongsheng Yang 	flush_work(&backing_dev->req_submit_work);
78*1d57628fSDongsheng Yang 	flush_work(&backing_dev->req_complete_work);
79*1d57628fSDongsheng Yang 
80*1d57628fSDongsheng Yang 	backing_dev_exit(backing_dev);
81*1d57628fSDongsheng Yang }
82*1d57628fSDongsheng Yang 
83*1d57628fSDongsheng Yang /* pcache_backing_dev_req functions */
84*1d57628fSDongsheng Yang void backing_dev_req_end(struct pcache_backing_dev_req *backing_req)
85*1d57628fSDongsheng Yang {
86*1d57628fSDongsheng Yang 	struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
87*1d57628fSDongsheng Yang 
88*1d57628fSDongsheng Yang 	if (backing_req->end_req)
89*1d57628fSDongsheng Yang 		backing_req->end_req(backing_req, backing_req->ret);
90*1d57628fSDongsheng Yang 
91*1d57628fSDongsheng Yang 	switch (backing_req->type) {
92*1d57628fSDongsheng Yang 	case BACKING_DEV_REQ_TYPE_REQ:
93*1d57628fSDongsheng Yang 		if (backing_req->req.upper_req)
94*1d57628fSDongsheng Yang 			pcache_req_put(backing_req->req.upper_req, backing_req->ret);
95*1d57628fSDongsheng Yang 		break;
96*1d57628fSDongsheng Yang 	case BACKING_DEV_REQ_TYPE_KMEM:
97*1d57628fSDongsheng Yang 		if (backing_req->kmem.bvecs != backing_req->kmem.inline_bvecs)
98*1d57628fSDongsheng Yang 			mempool_free(backing_req->kmem.bvecs, &backing_dev->bvec_pool);
99*1d57628fSDongsheng Yang 		break;
100*1d57628fSDongsheng Yang 	default:
101*1d57628fSDongsheng Yang 		BUG();
102*1d57628fSDongsheng Yang 	}
103*1d57628fSDongsheng Yang 
104*1d57628fSDongsheng Yang 	mempool_free(backing_req, &backing_dev->req_pool);
105*1d57628fSDongsheng Yang 
106*1d57628fSDongsheng Yang 	if (atomic_dec_and_test(&backing_dev->inflight_reqs))
107*1d57628fSDongsheng Yang 		wake_up(&backing_dev->inflight_wq);
108*1d57628fSDongsheng Yang }
109*1d57628fSDongsheng Yang 
110*1d57628fSDongsheng Yang static void req_complete_fn(struct work_struct *work)
111*1d57628fSDongsheng Yang {
112*1d57628fSDongsheng Yang 	struct pcache_backing_dev *backing_dev = container_of(work, struct pcache_backing_dev, req_complete_work);
113*1d57628fSDongsheng Yang 	struct pcache_backing_dev_req *backing_req;
114*1d57628fSDongsheng Yang 	LIST_HEAD(tmp_list);
115*1d57628fSDongsheng Yang 
116*1d57628fSDongsheng Yang 	spin_lock_irq(&backing_dev->complete_lock);
117*1d57628fSDongsheng Yang 	list_splice_init(&backing_dev->complete_list, &tmp_list);
118*1d57628fSDongsheng Yang 	spin_unlock_irq(&backing_dev->complete_lock);
119*1d57628fSDongsheng Yang 
120*1d57628fSDongsheng Yang 	while (!list_empty(&tmp_list)) {
121*1d57628fSDongsheng Yang 		backing_req = list_first_entry(&tmp_list,
122*1d57628fSDongsheng Yang 					    struct pcache_backing_dev_req, node);
123*1d57628fSDongsheng Yang 		list_del_init(&backing_req->node);
124*1d57628fSDongsheng Yang 		backing_dev_req_end(backing_req);
125*1d57628fSDongsheng Yang 	}
126*1d57628fSDongsheng Yang }
127*1d57628fSDongsheng Yang 
128*1d57628fSDongsheng Yang static void backing_dev_bio_end(struct bio *bio)
129*1d57628fSDongsheng Yang {
130*1d57628fSDongsheng Yang 	struct pcache_backing_dev_req *backing_req = bio->bi_private;
131*1d57628fSDongsheng Yang 	struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
132*1d57628fSDongsheng Yang 	unsigned long flags;
133*1d57628fSDongsheng Yang 
134*1d57628fSDongsheng Yang 	backing_req->ret = blk_status_to_errno(bio->bi_status);
135*1d57628fSDongsheng Yang 
136*1d57628fSDongsheng Yang 	spin_lock_irqsave(&backing_dev->complete_lock, flags);
137*1d57628fSDongsheng Yang 	list_move_tail(&backing_req->node, &backing_dev->complete_list);
138*1d57628fSDongsheng Yang 	queue_work(BACKING_DEV_TO_PCACHE(backing_dev)->task_wq, &backing_dev->req_complete_work);
139*1d57628fSDongsheng Yang 	spin_unlock_irqrestore(&backing_dev->complete_lock, flags);
140*1d57628fSDongsheng Yang }
141*1d57628fSDongsheng Yang 
142*1d57628fSDongsheng Yang static void req_submit_fn(struct work_struct *work)
143*1d57628fSDongsheng Yang {
144*1d57628fSDongsheng Yang 	struct pcache_backing_dev *backing_dev = container_of(work, struct pcache_backing_dev, req_submit_work);
145*1d57628fSDongsheng Yang 	struct pcache_backing_dev_req *backing_req;
146*1d57628fSDongsheng Yang 	LIST_HEAD(tmp_list);
147*1d57628fSDongsheng Yang 
148*1d57628fSDongsheng Yang 	spin_lock(&backing_dev->submit_lock);
149*1d57628fSDongsheng Yang 	list_splice_init(&backing_dev->submit_list, &tmp_list);
150*1d57628fSDongsheng Yang 	spin_unlock(&backing_dev->submit_lock);
151*1d57628fSDongsheng Yang 
152*1d57628fSDongsheng Yang 	while (!list_empty(&tmp_list)) {
153*1d57628fSDongsheng Yang 		backing_req = list_first_entry(&tmp_list,
154*1d57628fSDongsheng Yang 					    struct pcache_backing_dev_req, node);
155*1d57628fSDongsheng Yang 		list_del_init(&backing_req->node);
156*1d57628fSDongsheng Yang 		submit_bio_noacct(&backing_req->bio);
157*1d57628fSDongsheng Yang 	}
158*1d57628fSDongsheng Yang }
159*1d57628fSDongsheng Yang 
160*1d57628fSDongsheng Yang void backing_dev_req_submit(struct pcache_backing_dev_req *backing_req, bool direct)
161*1d57628fSDongsheng Yang {
162*1d57628fSDongsheng Yang 	struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
163*1d57628fSDongsheng Yang 
164*1d57628fSDongsheng Yang 	if (direct) {
165*1d57628fSDongsheng Yang 		submit_bio_noacct(&backing_req->bio);
166*1d57628fSDongsheng Yang 		return;
167*1d57628fSDongsheng Yang 	}
168*1d57628fSDongsheng Yang 
169*1d57628fSDongsheng Yang 	spin_lock(&backing_dev->submit_lock);
170*1d57628fSDongsheng Yang 	list_add_tail(&backing_req->node, &backing_dev->submit_list);
171*1d57628fSDongsheng Yang 	queue_work(BACKING_DEV_TO_PCACHE(backing_dev)->task_wq, &backing_dev->req_submit_work);
172*1d57628fSDongsheng Yang 	spin_unlock(&backing_dev->submit_lock);
173*1d57628fSDongsheng Yang }
174*1d57628fSDongsheng Yang 
175*1d57628fSDongsheng Yang static void bio_map(struct bio *bio, void *base, size_t size)
176*1d57628fSDongsheng Yang {
177*1d57628fSDongsheng Yang 	struct page *page;
178*1d57628fSDongsheng Yang 	unsigned int offset;
179*1d57628fSDongsheng Yang 	unsigned int len;
180*1d57628fSDongsheng Yang 
181*1d57628fSDongsheng Yang 	if (!is_vmalloc_addr(base)) {
182*1d57628fSDongsheng Yang 		page = virt_to_page(base);
183*1d57628fSDongsheng Yang 		offset = offset_in_page(base);
184*1d57628fSDongsheng Yang 
185*1d57628fSDongsheng Yang 		BUG_ON(!bio_add_page(bio, page, size, offset));
186*1d57628fSDongsheng Yang 		return;
187*1d57628fSDongsheng Yang 	}
188*1d57628fSDongsheng Yang 
189*1d57628fSDongsheng Yang 	flush_kernel_vmap_range(base, size);
190*1d57628fSDongsheng Yang 	while (size) {
191*1d57628fSDongsheng Yang 		page = vmalloc_to_page(base);
192*1d57628fSDongsheng Yang 		offset = offset_in_page(base);
193*1d57628fSDongsheng Yang 		len = min_t(size_t, PAGE_SIZE - offset, size);
194*1d57628fSDongsheng Yang 
195*1d57628fSDongsheng Yang 		BUG_ON(!bio_add_page(bio, page, len, offset));
196*1d57628fSDongsheng Yang 		size -= len;
197*1d57628fSDongsheng Yang 		base += len;
198*1d57628fSDongsheng Yang 	}
199*1d57628fSDongsheng Yang }
200*1d57628fSDongsheng Yang 
201*1d57628fSDongsheng Yang static struct pcache_backing_dev_req *req_type_req_alloc(struct pcache_backing_dev *backing_dev,
202*1d57628fSDongsheng Yang 							struct pcache_backing_dev_req_opts *opts)
203*1d57628fSDongsheng Yang {
204*1d57628fSDongsheng Yang 	struct pcache_request *pcache_req = opts->req.upper_req;
205*1d57628fSDongsheng Yang 	struct pcache_backing_dev_req *backing_req;
206*1d57628fSDongsheng Yang 	struct bio *orig = pcache_req->bio;
207*1d57628fSDongsheng Yang 
208*1d57628fSDongsheng Yang 	backing_req = mempool_alloc(&backing_dev->req_pool, opts->gfp_mask);
209*1d57628fSDongsheng Yang 	if (!backing_req)
210*1d57628fSDongsheng Yang 		return NULL;
211*1d57628fSDongsheng Yang 
212*1d57628fSDongsheng Yang 	memset(backing_req, 0, sizeof(struct pcache_backing_dev_req));
213*1d57628fSDongsheng Yang 
214*1d57628fSDongsheng Yang 	bio_init_clone(backing_dev->dm_dev->bdev, &backing_req->bio, orig, opts->gfp_mask);
215*1d57628fSDongsheng Yang 
216*1d57628fSDongsheng Yang 	backing_req->type = BACKING_DEV_REQ_TYPE_REQ;
217*1d57628fSDongsheng Yang 	backing_req->backing_dev = backing_dev;
218*1d57628fSDongsheng Yang 	atomic_inc(&backing_dev->inflight_reqs);
219*1d57628fSDongsheng Yang 
220*1d57628fSDongsheng Yang 	return backing_req;
221*1d57628fSDongsheng Yang }
222*1d57628fSDongsheng Yang 
223*1d57628fSDongsheng Yang static struct pcache_backing_dev_req *kmem_type_req_alloc(struct pcache_backing_dev *backing_dev,
224*1d57628fSDongsheng Yang 						struct pcache_backing_dev_req_opts *opts)
225*1d57628fSDongsheng Yang {
226*1d57628fSDongsheng Yang 	struct pcache_backing_dev_req *backing_req;
227*1d57628fSDongsheng Yang 	u32 n_vecs = bio_add_max_vecs(opts->kmem.data, opts->kmem.len);
228*1d57628fSDongsheng Yang 
229*1d57628fSDongsheng Yang 	backing_req = mempool_alloc(&backing_dev->req_pool, opts->gfp_mask);
230*1d57628fSDongsheng Yang 	if (!backing_req)
231*1d57628fSDongsheng Yang 		return NULL;
232*1d57628fSDongsheng Yang 
233*1d57628fSDongsheng Yang 	memset(backing_req, 0, sizeof(struct pcache_backing_dev_req));
234*1d57628fSDongsheng Yang 
235*1d57628fSDongsheng Yang 	if (n_vecs > BACKING_DEV_REQ_INLINE_BVECS) {
236*1d57628fSDongsheng Yang 		backing_req->kmem.bvecs = mempool_alloc(&backing_dev->bvec_pool, opts->gfp_mask);
237*1d57628fSDongsheng Yang 		if (!backing_req->kmem.bvecs)
238*1d57628fSDongsheng Yang 			goto free_backing_req;
239*1d57628fSDongsheng Yang 	} else {
240*1d57628fSDongsheng Yang 		backing_req->kmem.bvecs = backing_req->kmem.inline_bvecs;
241*1d57628fSDongsheng Yang 	}
242*1d57628fSDongsheng Yang 
243*1d57628fSDongsheng Yang 	backing_req->kmem.n_vecs = n_vecs;
244*1d57628fSDongsheng Yang 	backing_req->type = BACKING_DEV_REQ_TYPE_KMEM;
245*1d57628fSDongsheng Yang 	backing_req->backing_dev = backing_dev;
246*1d57628fSDongsheng Yang 	atomic_inc(&backing_dev->inflight_reqs);
247*1d57628fSDongsheng Yang 
248*1d57628fSDongsheng Yang 	return backing_req;
249*1d57628fSDongsheng Yang 
250*1d57628fSDongsheng Yang free_backing_req:
251*1d57628fSDongsheng Yang 	mempool_free(backing_req, &backing_dev->req_pool);
252*1d57628fSDongsheng Yang 	return NULL;
253*1d57628fSDongsheng Yang }
254*1d57628fSDongsheng Yang 
255*1d57628fSDongsheng Yang struct pcache_backing_dev_req *backing_dev_req_alloc(struct pcache_backing_dev *backing_dev,
256*1d57628fSDongsheng Yang 						struct pcache_backing_dev_req_opts *opts)
257*1d57628fSDongsheng Yang {
258*1d57628fSDongsheng Yang 	if (opts->type == BACKING_DEV_REQ_TYPE_REQ)
259*1d57628fSDongsheng Yang 		return req_type_req_alloc(backing_dev, opts);
260*1d57628fSDongsheng Yang 
261*1d57628fSDongsheng Yang 	if (opts->type == BACKING_DEV_REQ_TYPE_KMEM)
262*1d57628fSDongsheng Yang 		return kmem_type_req_alloc(backing_dev, opts);
263*1d57628fSDongsheng Yang 
264*1d57628fSDongsheng Yang 	BUG();
265*1d57628fSDongsheng Yang }
266*1d57628fSDongsheng Yang 
267*1d57628fSDongsheng Yang static void req_type_req_init(struct pcache_backing_dev_req *backing_req,
268*1d57628fSDongsheng Yang 			struct pcache_backing_dev_req_opts *opts)
269*1d57628fSDongsheng Yang {
270*1d57628fSDongsheng Yang 	struct pcache_request *pcache_req = opts->req.upper_req;
271*1d57628fSDongsheng Yang 	struct bio *clone;
272*1d57628fSDongsheng Yang 	u32 off = opts->req.req_off;
273*1d57628fSDongsheng Yang 	u32 len = opts->req.len;
274*1d57628fSDongsheng Yang 
275*1d57628fSDongsheng Yang 	clone = &backing_req->bio;
276*1d57628fSDongsheng Yang 	BUG_ON(off & SECTOR_MASK);
277*1d57628fSDongsheng Yang 	BUG_ON(len & SECTOR_MASK);
278*1d57628fSDongsheng Yang 	bio_trim(clone, off >> SECTOR_SHIFT, len >> SECTOR_SHIFT);
279*1d57628fSDongsheng Yang 
280*1d57628fSDongsheng Yang 	clone->bi_iter.bi_sector = (pcache_req->off + off) >> SECTOR_SHIFT;
281*1d57628fSDongsheng Yang 	clone->bi_private = backing_req;
282*1d57628fSDongsheng Yang 	clone->bi_end_io = backing_dev_bio_end;
283*1d57628fSDongsheng Yang 
284*1d57628fSDongsheng Yang 	INIT_LIST_HEAD(&backing_req->node);
285*1d57628fSDongsheng Yang 	backing_req->end_req     = opts->end_fn;
286*1d57628fSDongsheng Yang 
287*1d57628fSDongsheng Yang 	pcache_req_get(pcache_req);
288*1d57628fSDongsheng Yang 	backing_req->req.upper_req	= pcache_req;
289*1d57628fSDongsheng Yang 	backing_req->req.bio_off	= off;
290*1d57628fSDongsheng Yang }
291*1d57628fSDongsheng Yang 
292*1d57628fSDongsheng Yang static void kmem_type_req_init(struct pcache_backing_dev_req *backing_req,
293*1d57628fSDongsheng Yang 			struct pcache_backing_dev_req_opts *opts)
294*1d57628fSDongsheng Yang {
295*1d57628fSDongsheng Yang 	struct pcache_backing_dev *backing_dev = backing_req->backing_dev;
296*1d57628fSDongsheng Yang 	struct bio *backing_bio;
297*1d57628fSDongsheng Yang 
298*1d57628fSDongsheng Yang 	bio_init(&backing_req->bio, backing_dev->dm_dev->bdev, backing_req->kmem.bvecs,
299*1d57628fSDongsheng Yang 			backing_req->kmem.n_vecs, opts->kmem.opf);
300*1d57628fSDongsheng Yang 
301*1d57628fSDongsheng Yang 	backing_bio = &backing_req->bio;
302*1d57628fSDongsheng Yang 	bio_map(backing_bio, opts->kmem.data, opts->kmem.len);
303*1d57628fSDongsheng Yang 
304*1d57628fSDongsheng Yang 	backing_bio->bi_iter.bi_sector = (opts->kmem.backing_off) >> SECTOR_SHIFT;
305*1d57628fSDongsheng Yang 	backing_bio->bi_private = backing_req;
306*1d57628fSDongsheng Yang 	backing_bio->bi_end_io = backing_dev_bio_end;
307*1d57628fSDongsheng Yang 
308*1d57628fSDongsheng Yang 	INIT_LIST_HEAD(&backing_req->node);
309*1d57628fSDongsheng Yang 	backing_req->end_req	= opts->end_fn;
310*1d57628fSDongsheng Yang 	backing_req->priv_data	= opts->priv_data;
311*1d57628fSDongsheng Yang }
312*1d57628fSDongsheng Yang 
313*1d57628fSDongsheng Yang void backing_dev_req_init(struct pcache_backing_dev_req *backing_req,
314*1d57628fSDongsheng Yang 			struct pcache_backing_dev_req_opts *opts)
315*1d57628fSDongsheng Yang {
316*1d57628fSDongsheng Yang 	if (opts->type == BACKING_DEV_REQ_TYPE_REQ)
317*1d57628fSDongsheng Yang 		return req_type_req_init(backing_req, opts);
318*1d57628fSDongsheng Yang 
319*1d57628fSDongsheng Yang 	if (opts->type == BACKING_DEV_REQ_TYPE_KMEM)
320*1d57628fSDongsheng Yang 		return kmem_type_req_init(backing_req, opts);
321*1d57628fSDongsheng Yang 
322*1d57628fSDongsheng Yang 	BUG();
323*1d57628fSDongsheng Yang }
324*1d57628fSDongsheng Yang 
325*1d57628fSDongsheng Yang struct pcache_backing_dev_req *backing_dev_req_create(struct pcache_backing_dev *backing_dev,
326*1d57628fSDongsheng Yang 						struct pcache_backing_dev_req_opts *opts)
327*1d57628fSDongsheng Yang {
328*1d57628fSDongsheng Yang 	struct pcache_backing_dev_req *backing_req;
329*1d57628fSDongsheng Yang 
330*1d57628fSDongsheng Yang 	backing_req = backing_dev_req_alloc(backing_dev, opts);
331*1d57628fSDongsheng Yang 	if (!backing_req)
332*1d57628fSDongsheng Yang 		return NULL;
333*1d57628fSDongsheng Yang 
334*1d57628fSDongsheng Yang 	backing_dev_req_init(backing_req, opts);
335*1d57628fSDongsheng Yang 
336*1d57628fSDongsheng Yang 	return backing_req;
337*1d57628fSDongsheng Yang }
338*1d57628fSDongsheng Yang 
339*1d57628fSDongsheng Yang void backing_dev_flush(struct pcache_backing_dev *backing_dev)
340*1d57628fSDongsheng Yang {
341*1d57628fSDongsheng Yang 	blkdev_issue_flush(backing_dev->dm_dev->bdev);
342*1d57628fSDongsheng Yang }
343*1d57628fSDongsheng Yang 
344*1d57628fSDongsheng Yang int pcache_backing_init(void)
345*1d57628fSDongsheng Yang {
346*1d57628fSDongsheng Yang 	u32 max_bvecs = (PCACHE_CACHE_SUBTREE_SIZE >> PAGE_SHIFT) + 1;
347*1d57628fSDongsheng Yang 	int ret;
348*1d57628fSDongsheng Yang 
349*1d57628fSDongsheng Yang 	backing_req_cache = KMEM_CACHE(pcache_backing_dev_req, 0);
350*1d57628fSDongsheng Yang 	if (!backing_req_cache) {
351*1d57628fSDongsheng Yang 		ret = -ENOMEM;
352*1d57628fSDongsheng Yang 		goto err;
353*1d57628fSDongsheng Yang 	}
354*1d57628fSDongsheng Yang 
355*1d57628fSDongsheng Yang 	backing_bvec_cache = kmem_cache_create("pcache-bvec-slab",
356*1d57628fSDongsheng Yang 					max_bvecs * sizeof(struct bio_vec),
357*1d57628fSDongsheng Yang 					0, 0, NULL);
358*1d57628fSDongsheng Yang 	if (!backing_bvec_cache) {
359*1d57628fSDongsheng Yang 		ret = -ENOMEM;
360*1d57628fSDongsheng Yang 		goto destroy_req_cache;
361*1d57628fSDongsheng Yang 	}
362*1d57628fSDongsheng Yang 
363*1d57628fSDongsheng Yang 	return 0;
364*1d57628fSDongsheng Yang destroy_req_cache:
365*1d57628fSDongsheng Yang 	kmem_cache_destroy(backing_req_cache);
366*1d57628fSDongsheng Yang err:
367*1d57628fSDongsheng Yang 	return ret;
368*1d57628fSDongsheng Yang }
369*1d57628fSDongsheng Yang 
370*1d57628fSDongsheng Yang void pcache_backing_exit(void)
371*1d57628fSDongsheng Yang {
372*1d57628fSDongsheng Yang 	kmem_cache_destroy(backing_bvec_cache);
373*1d57628fSDongsheng Yang 	kmem_cache_destroy(backing_req_cache);
374*1d57628fSDongsheng Yang }
375