xref: /linux/drivers/md/dm-pcache/cache.c (revision 4f38da1f027ea2c9f01bb71daa7a299c191b6940)
1*1d57628fSDongsheng Yang // SPDX-License-Identifier: GPL-2.0-or-later
2*1d57628fSDongsheng Yang #include <linux/blk_types.h>
3*1d57628fSDongsheng Yang 
4*1d57628fSDongsheng Yang #include "cache.h"
5*1d57628fSDongsheng Yang #include "cache_dev.h"
6*1d57628fSDongsheng Yang #include "backing_dev.h"
7*1d57628fSDongsheng Yang #include "dm_pcache.h"
8*1d57628fSDongsheng Yang 
9*1d57628fSDongsheng Yang struct kmem_cache *key_cache;
10*1d57628fSDongsheng Yang 
11*1d57628fSDongsheng Yang static inline struct pcache_cache_info *get_cache_info_addr(struct pcache_cache *cache)
12*1d57628fSDongsheng Yang {
13*1d57628fSDongsheng Yang 	return cache->cache_info_addr + cache->info_index;
14*1d57628fSDongsheng Yang }
15*1d57628fSDongsheng Yang 
16*1d57628fSDongsheng Yang static void cache_info_write(struct pcache_cache *cache)
17*1d57628fSDongsheng Yang {
18*1d57628fSDongsheng Yang 	struct pcache_cache_info *cache_info = &cache->cache_info;
19*1d57628fSDongsheng Yang 
20*1d57628fSDongsheng Yang 	cache_info->header.seq++;
21*1d57628fSDongsheng Yang 	cache_info->header.crc = pcache_meta_crc(&cache_info->header,
22*1d57628fSDongsheng Yang 						sizeof(struct pcache_cache_info));
23*1d57628fSDongsheng Yang 
24*1d57628fSDongsheng Yang 	memcpy_flushcache(get_cache_info_addr(cache), cache_info,
25*1d57628fSDongsheng Yang 			sizeof(struct pcache_cache_info));
26*1d57628fSDongsheng Yang 
27*1d57628fSDongsheng Yang 	cache->info_index = (cache->info_index + 1) % PCACHE_META_INDEX_MAX;
28*1d57628fSDongsheng Yang }
29*1d57628fSDongsheng Yang 
30*1d57628fSDongsheng Yang static void cache_info_init_default(struct pcache_cache *cache);
31*1d57628fSDongsheng Yang static int cache_info_init(struct pcache_cache *cache, struct pcache_cache_options *opts)
32*1d57628fSDongsheng Yang {
33*1d57628fSDongsheng Yang 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
34*1d57628fSDongsheng Yang 	struct pcache_cache_info *cache_info_addr;
35*1d57628fSDongsheng Yang 
36*1d57628fSDongsheng Yang 	cache_info_addr = pcache_meta_find_latest(&cache->cache_info_addr->header,
37*1d57628fSDongsheng Yang 						sizeof(struct pcache_cache_info),
38*1d57628fSDongsheng Yang 						PCACHE_CACHE_INFO_SIZE,
39*1d57628fSDongsheng Yang 						&cache->cache_info);
40*1d57628fSDongsheng Yang 	if (IS_ERR(cache_info_addr))
41*1d57628fSDongsheng Yang 		return PTR_ERR(cache_info_addr);
42*1d57628fSDongsheng Yang 
43*1d57628fSDongsheng Yang 	if (cache_info_addr) {
44*1d57628fSDongsheng Yang 		if (opts->data_crc !=
45*1d57628fSDongsheng Yang 				(cache->cache_info.flags & PCACHE_CACHE_FLAGS_DATA_CRC)) {
46*1d57628fSDongsheng Yang 			pcache_dev_err(pcache, "invalid option for data_crc: %s, expected: %s",
47*1d57628fSDongsheng Yang 					opts->data_crc ? "true" : "false",
48*1d57628fSDongsheng Yang 					cache->cache_info.flags & PCACHE_CACHE_FLAGS_DATA_CRC ? "true" : "false");
49*1d57628fSDongsheng Yang 			return -EINVAL;
50*1d57628fSDongsheng Yang 		}
51*1d57628fSDongsheng Yang 
52*1d57628fSDongsheng Yang 		return 0;
53*1d57628fSDongsheng Yang 	}
54*1d57628fSDongsheng Yang 
55*1d57628fSDongsheng Yang 	/* init cache_info for new cache */
56*1d57628fSDongsheng Yang 	cache_info_init_default(cache);
57*1d57628fSDongsheng Yang 	cache_mode_set(cache, opts->cache_mode);
58*1d57628fSDongsheng Yang 	if (opts->data_crc)
59*1d57628fSDongsheng Yang 		cache->cache_info.flags |= PCACHE_CACHE_FLAGS_DATA_CRC;
60*1d57628fSDongsheng Yang 
61*1d57628fSDongsheng Yang 	return 0;
62*1d57628fSDongsheng Yang }
63*1d57628fSDongsheng Yang 
64*1d57628fSDongsheng Yang static void cache_info_set_gc_percent(struct pcache_cache_info *cache_info, u8 percent)
65*1d57628fSDongsheng Yang {
66*1d57628fSDongsheng Yang 	cache_info->flags &= ~PCACHE_CACHE_FLAGS_GC_PERCENT_MASK;
67*1d57628fSDongsheng Yang 	cache_info->flags |= FIELD_PREP(PCACHE_CACHE_FLAGS_GC_PERCENT_MASK, percent);
68*1d57628fSDongsheng Yang }
69*1d57628fSDongsheng Yang 
70*1d57628fSDongsheng Yang int pcache_cache_set_gc_percent(struct pcache_cache *cache, u8 percent)
71*1d57628fSDongsheng Yang {
72*1d57628fSDongsheng Yang 	if (percent > PCACHE_CACHE_GC_PERCENT_MAX || percent < PCACHE_CACHE_GC_PERCENT_MIN)
73*1d57628fSDongsheng Yang 		return -EINVAL;
74*1d57628fSDongsheng Yang 
75*1d57628fSDongsheng Yang 	mutex_lock(&cache->cache_info_lock);
76*1d57628fSDongsheng Yang 	cache_info_set_gc_percent(&cache->cache_info, percent);
77*1d57628fSDongsheng Yang 
78*1d57628fSDongsheng Yang 	cache_info_write(cache);
79*1d57628fSDongsheng Yang 	mutex_unlock(&cache->cache_info_lock);
80*1d57628fSDongsheng Yang 
81*1d57628fSDongsheng Yang 	return 0;
82*1d57628fSDongsheng Yang }
83*1d57628fSDongsheng Yang 
84*1d57628fSDongsheng Yang void cache_pos_encode(struct pcache_cache *cache,
85*1d57628fSDongsheng Yang 			     struct pcache_cache_pos_onmedia *pos_onmedia_base,
86*1d57628fSDongsheng Yang 			     struct pcache_cache_pos *pos, u64 seq, u32 *index)
87*1d57628fSDongsheng Yang {
88*1d57628fSDongsheng Yang 	struct pcache_cache_pos_onmedia pos_onmedia;
89*1d57628fSDongsheng Yang 	struct pcache_cache_pos_onmedia *pos_onmedia_addr = pos_onmedia_base + *index;
90*1d57628fSDongsheng Yang 
91*1d57628fSDongsheng Yang 	pos_onmedia.cache_seg_id = pos->cache_seg->cache_seg_id;
92*1d57628fSDongsheng Yang 	pos_onmedia.seg_off = pos->seg_off;
93*1d57628fSDongsheng Yang 	pos_onmedia.header.seq = seq;
94*1d57628fSDongsheng Yang 	pos_onmedia.header.crc = cache_pos_onmedia_crc(&pos_onmedia);
95*1d57628fSDongsheng Yang 
96*1d57628fSDongsheng Yang 	memcpy_flushcache(pos_onmedia_addr, &pos_onmedia, sizeof(struct pcache_cache_pos_onmedia));
97*1d57628fSDongsheng Yang 	pmem_wmb();
98*1d57628fSDongsheng Yang 
99*1d57628fSDongsheng Yang 	*index = (*index + 1) % PCACHE_META_INDEX_MAX;
100*1d57628fSDongsheng Yang }
101*1d57628fSDongsheng Yang 
102*1d57628fSDongsheng Yang int cache_pos_decode(struct pcache_cache *cache,
103*1d57628fSDongsheng Yang 			    struct pcache_cache_pos_onmedia *pos_onmedia,
104*1d57628fSDongsheng Yang 			    struct pcache_cache_pos *pos, u64 *seq, u32 *index)
105*1d57628fSDongsheng Yang {
106*1d57628fSDongsheng Yang 	struct pcache_cache_pos_onmedia latest, *latest_addr;
107*1d57628fSDongsheng Yang 
108*1d57628fSDongsheng Yang 	latest_addr = pcache_meta_find_latest(&pos_onmedia->header,
109*1d57628fSDongsheng Yang 					sizeof(struct pcache_cache_pos_onmedia),
110*1d57628fSDongsheng Yang 					sizeof(struct pcache_cache_pos_onmedia),
111*1d57628fSDongsheng Yang 					&latest);
112*1d57628fSDongsheng Yang 	if (IS_ERR(latest_addr))
113*1d57628fSDongsheng Yang 		return PTR_ERR(latest_addr);
114*1d57628fSDongsheng Yang 
115*1d57628fSDongsheng Yang 	if (!latest_addr)
116*1d57628fSDongsheng Yang 		return -EIO;
117*1d57628fSDongsheng Yang 
118*1d57628fSDongsheng Yang 	pos->cache_seg = &cache->segments[latest.cache_seg_id];
119*1d57628fSDongsheng Yang 	pos->seg_off = latest.seg_off;
120*1d57628fSDongsheng Yang 	*seq = latest.header.seq;
121*1d57628fSDongsheng Yang 	*index = (latest_addr - pos_onmedia);
122*1d57628fSDongsheng Yang 
123*1d57628fSDongsheng Yang 	return 0;
124*1d57628fSDongsheng Yang }
125*1d57628fSDongsheng Yang 
126*1d57628fSDongsheng Yang static inline void cache_info_set_seg_id(struct pcache_cache *cache, u32 seg_id)
127*1d57628fSDongsheng Yang {
128*1d57628fSDongsheng Yang 	cache->cache_info.seg_id = seg_id;
129*1d57628fSDongsheng Yang }
130*1d57628fSDongsheng Yang 
131*1d57628fSDongsheng Yang static int cache_init(struct dm_pcache *pcache)
132*1d57628fSDongsheng Yang {
133*1d57628fSDongsheng Yang 	struct pcache_cache *cache = &pcache->cache;
134*1d57628fSDongsheng Yang 	struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
135*1d57628fSDongsheng Yang 	struct pcache_cache_dev *cache_dev = &pcache->cache_dev;
136*1d57628fSDongsheng Yang 	int ret;
137*1d57628fSDongsheng Yang 
138*1d57628fSDongsheng Yang 	cache->segments = kvcalloc(cache_dev->seg_num, sizeof(struct pcache_cache_segment), GFP_KERNEL);
139*1d57628fSDongsheng Yang 	if (!cache->segments) {
140*1d57628fSDongsheng Yang 		ret = -ENOMEM;
141*1d57628fSDongsheng Yang 		goto err;
142*1d57628fSDongsheng Yang 	}
143*1d57628fSDongsheng Yang 
144*1d57628fSDongsheng Yang 	cache->seg_map = kvcalloc(BITS_TO_LONGS(cache_dev->seg_num), sizeof(unsigned long), GFP_KERNEL);
145*1d57628fSDongsheng Yang 	if (!cache->seg_map) {
146*1d57628fSDongsheng Yang 		ret = -ENOMEM;
147*1d57628fSDongsheng Yang 		goto free_segments;
148*1d57628fSDongsheng Yang 	}
149*1d57628fSDongsheng Yang 
150*1d57628fSDongsheng Yang 	cache->backing_dev = backing_dev;
151*1d57628fSDongsheng Yang 	cache->cache_dev = &pcache->cache_dev;
152*1d57628fSDongsheng Yang 	cache->n_segs = cache_dev->seg_num;
153*1d57628fSDongsheng Yang 	atomic_set(&cache->gc_errors, 0);
154*1d57628fSDongsheng Yang 	spin_lock_init(&cache->seg_map_lock);
155*1d57628fSDongsheng Yang 	spin_lock_init(&cache->key_head_lock);
156*1d57628fSDongsheng Yang 
157*1d57628fSDongsheng Yang 	mutex_init(&cache->cache_info_lock);
158*1d57628fSDongsheng Yang 	mutex_init(&cache->key_tail_lock);
159*1d57628fSDongsheng Yang 	mutex_init(&cache->dirty_tail_lock);
160*1d57628fSDongsheng Yang 	mutex_init(&cache->writeback_lock);
161*1d57628fSDongsheng Yang 
162*1d57628fSDongsheng Yang 	INIT_DELAYED_WORK(&cache->writeback_work, cache_writeback_fn);
163*1d57628fSDongsheng Yang 	INIT_DELAYED_WORK(&cache->gc_work, pcache_cache_gc_fn);
164*1d57628fSDongsheng Yang 	INIT_WORK(&cache->clean_work, clean_fn);
165*1d57628fSDongsheng Yang 
166*1d57628fSDongsheng Yang 	return 0;
167*1d57628fSDongsheng Yang 
168*1d57628fSDongsheng Yang free_segments:
169*1d57628fSDongsheng Yang 	kvfree(cache->segments);
170*1d57628fSDongsheng Yang err:
171*1d57628fSDongsheng Yang 	return ret;
172*1d57628fSDongsheng Yang }
173*1d57628fSDongsheng Yang 
174*1d57628fSDongsheng Yang static void cache_exit(struct pcache_cache *cache)
175*1d57628fSDongsheng Yang {
176*1d57628fSDongsheng Yang 	kvfree(cache->seg_map);
177*1d57628fSDongsheng Yang 	kvfree(cache->segments);
178*1d57628fSDongsheng Yang }
179*1d57628fSDongsheng Yang 
180*1d57628fSDongsheng Yang static void cache_info_init_default(struct pcache_cache *cache)
181*1d57628fSDongsheng Yang {
182*1d57628fSDongsheng Yang 	struct pcache_cache_info *cache_info = &cache->cache_info;
183*1d57628fSDongsheng Yang 
184*1d57628fSDongsheng Yang 	cache_info->header.seq = 0;
185*1d57628fSDongsheng Yang 	cache_info->n_segs = cache->cache_dev->seg_num;
186*1d57628fSDongsheng Yang 	cache_info_set_gc_percent(cache_info, PCACHE_CACHE_GC_PERCENT_DEFAULT);
187*1d57628fSDongsheng Yang }
188*1d57628fSDongsheng Yang 
189*1d57628fSDongsheng Yang static int cache_tail_init(struct pcache_cache *cache)
190*1d57628fSDongsheng Yang {
191*1d57628fSDongsheng Yang 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
192*1d57628fSDongsheng Yang 	bool new_cache = !(cache->cache_info.flags & PCACHE_CACHE_FLAGS_INIT_DONE);
193*1d57628fSDongsheng Yang 
194*1d57628fSDongsheng Yang 	if (new_cache) {
195*1d57628fSDongsheng Yang 		__set_bit(0, cache->seg_map);
196*1d57628fSDongsheng Yang 
197*1d57628fSDongsheng Yang 		cache->key_head.cache_seg = &cache->segments[0];
198*1d57628fSDongsheng Yang 		cache->key_head.seg_off = 0;
199*1d57628fSDongsheng Yang 		cache_pos_copy(&cache->key_tail, &cache->key_head);
200*1d57628fSDongsheng Yang 		cache_pos_copy(&cache->dirty_tail, &cache->key_head);
201*1d57628fSDongsheng Yang 
202*1d57628fSDongsheng Yang 		cache_encode_dirty_tail(cache);
203*1d57628fSDongsheng Yang 		cache_encode_key_tail(cache);
204*1d57628fSDongsheng Yang 	} else {
205*1d57628fSDongsheng Yang 		if (cache_decode_key_tail(cache) || cache_decode_dirty_tail(cache)) {
206*1d57628fSDongsheng Yang 			pcache_dev_err(pcache, "Corrupted key tail or dirty tail.\n");
207*1d57628fSDongsheng Yang 			return -EIO;
208*1d57628fSDongsheng Yang 		}
209*1d57628fSDongsheng Yang 	}
210*1d57628fSDongsheng Yang 
211*1d57628fSDongsheng Yang 	return 0;
212*1d57628fSDongsheng Yang }
213*1d57628fSDongsheng Yang 
214*1d57628fSDongsheng Yang static int get_seg_id(struct pcache_cache *cache,
215*1d57628fSDongsheng Yang 		      struct pcache_cache_segment *prev_cache_seg,
216*1d57628fSDongsheng Yang 		      bool new_cache, u32 *seg_id)
217*1d57628fSDongsheng Yang {
218*1d57628fSDongsheng Yang 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
219*1d57628fSDongsheng Yang 	struct pcache_cache_dev *cache_dev = cache->cache_dev;
220*1d57628fSDongsheng Yang 	int ret;
221*1d57628fSDongsheng Yang 
222*1d57628fSDongsheng Yang 	if (new_cache) {
223*1d57628fSDongsheng Yang 		ret = cache_dev_get_empty_segment_id(cache_dev, seg_id);
224*1d57628fSDongsheng Yang 		if (ret) {
225*1d57628fSDongsheng Yang 			pcache_dev_err(pcache, "no available segment\n");
226*1d57628fSDongsheng Yang 			goto err;
227*1d57628fSDongsheng Yang 		}
228*1d57628fSDongsheng Yang 
229*1d57628fSDongsheng Yang 		if (prev_cache_seg)
230*1d57628fSDongsheng Yang 			cache_seg_set_next_seg(prev_cache_seg, *seg_id);
231*1d57628fSDongsheng Yang 		else
232*1d57628fSDongsheng Yang 			cache_info_set_seg_id(cache, *seg_id);
233*1d57628fSDongsheng Yang 	} else {
234*1d57628fSDongsheng Yang 		if (prev_cache_seg) {
235*1d57628fSDongsheng Yang 			struct pcache_segment_info *prev_seg_info;
236*1d57628fSDongsheng Yang 
237*1d57628fSDongsheng Yang 			prev_seg_info = &prev_cache_seg->cache_seg_info;
238*1d57628fSDongsheng Yang 			if (!segment_info_has_next(prev_seg_info)) {
239*1d57628fSDongsheng Yang 				ret = -EFAULT;
240*1d57628fSDongsheng Yang 				goto err;
241*1d57628fSDongsheng Yang 			}
242*1d57628fSDongsheng Yang 			*seg_id = prev_cache_seg->cache_seg_info.next_seg;
243*1d57628fSDongsheng Yang 		} else {
244*1d57628fSDongsheng Yang 			*seg_id = cache->cache_info.seg_id;
245*1d57628fSDongsheng Yang 		}
246*1d57628fSDongsheng Yang 	}
247*1d57628fSDongsheng Yang 	return 0;
248*1d57628fSDongsheng Yang err:
249*1d57628fSDongsheng Yang 	return ret;
250*1d57628fSDongsheng Yang }
251*1d57628fSDongsheng Yang 
252*1d57628fSDongsheng Yang static int cache_segs_init(struct pcache_cache *cache)
253*1d57628fSDongsheng Yang {
254*1d57628fSDongsheng Yang 	struct pcache_cache_segment *prev_cache_seg = NULL;
255*1d57628fSDongsheng Yang 	struct pcache_cache_info *cache_info = &cache->cache_info;
256*1d57628fSDongsheng Yang 	bool new_cache = !(cache->cache_info.flags & PCACHE_CACHE_FLAGS_INIT_DONE);
257*1d57628fSDongsheng Yang 	u32 seg_id;
258*1d57628fSDongsheng Yang 	int ret;
259*1d57628fSDongsheng Yang 	u32 i;
260*1d57628fSDongsheng Yang 
261*1d57628fSDongsheng Yang 	for (i = 0; i < cache_info->n_segs; i++) {
262*1d57628fSDongsheng Yang 		ret = get_seg_id(cache, prev_cache_seg, new_cache, &seg_id);
263*1d57628fSDongsheng Yang 		if (ret)
264*1d57628fSDongsheng Yang 			goto err;
265*1d57628fSDongsheng Yang 
266*1d57628fSDongsheng Yang 		ret = cache_seg_init(cache, seg_id, i, new_cache);
267*1d57628fSDongsheng Yang 		if (ret)
268*1d57628fSDongsheng Yang 			goto err;
269*1d57628fSDongsheng Yang 
270*1d57628fSDongsheng Yang 		prev_cache_seg = &cache->segments[i];
271*1d57628fSDongsheng Yang 	}
272*1d57628fSDongsheng Yang 	return 0;
273*1d57628fSDongsheng Yang err:
274*1d57628fSDongsheng Yang 	return ret;
275*1d57628fSDongsheng Yang }
276*1d57628fSDongsheng Yang 
277*1d57628fSDongsheng Yang static int cache_init_req_keys(struct pcache_cache *cache, u32 n_paral)
278*1d57628fSDongsheng Yang {
279*1d57628fSDongsheng Yang 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
280*1d57628fSDongsheng Yang 	u32 n_subtrees;
281*1d57628fSDongsheng Yang 	int ret;
282*1d57628fSDongsheng Yang 	u32 i, cpu;
283*1d57628fSDongsheng Yang 
284*1d57628fSDongsheng Yang 	/* Calculate number of cache trees based on the device size */
285*1d57628fSDongsheng Yang 	n_subtrees = DIV_ROUND_UP(cache->dev_size << SECTOR_SHIFT, PCACHE_CACHE_SUBTREE_SIZE);
286*1d57628fSDongsheng Yang 	ret = cache_tree_init(cache, &cache->req_key_tree, n_subtrees);
287*1d57628fSDongsheng Yang 	if (ret)
288*1d57628fSDongsheng Yang 		goto err;
289*1d57628fSDongsheng Yang 
290*1d57628fSDongsheng Yang 	cache->n_ksets = n_paral;
291*1d57628fSDongsheng Yang 	cache->ksets = kvcalloc(cache->n_ksets, PCACHE_KSET_SIZE, GFP_KERNEL);
292*1d57628fSDongsheng Yang 	if (!cache->ksets) {
293*1d57628fSDongsheng Yang 		ret = -ENOMEM;
294*1d57628fSDongsheng Yang 		goto req_tree_exit;
295*1d57628fSDongsheng Yang 	}
296*1d57628fSDongsheng Yang 
297*1d57628fSDongsheng Yang 	/*
298*1d57628fSDongsheng Yang 	 * Initialize each kset with a spinlock and delayed work for flushing.
299*1d57628fSDongsheng Yang 	 * Each kset is associated with one queue to ensure independent handling
300*1d57628fSDongsheng Yang 	 * of cache keys across multiple queues, maximizing multiqueue concurrency.
301*1d57628fSDongsheng Yang 	 */
302*1d57628fSDongsheng Yang 	for (i = 0; i < cache->n_ksets; i++) {
303*1d57628fSDongsheng Yang 		struct pcache_cache_kset *kset = get_kset(cache, i);
304*1d57628fSDongsheng Yang 
305*1d57628fSDongsheng Yang 		kset->cache = cache;
306*1d57628fSDongsheng Yang 		spin_lock_init(&kset->kset_lock);
307*1d57628fSDongsheng Yang 		INIT_DELAYED_WORK(&kset->flush_work, kset_flush_fn);
308*1d57628fSDongsheng Yang 	}
309*1d57628fSDongsheng Yang 
310*1d57628fSDongsheng Yang 	cache->data_heads = alloc_percpu(struct pcache_cache_data_head);
311*1d57628fSDongsheng Yang 	if (!cache->data_heads) {
312*1d57628fSDongsheng Yang 		ret = -ENOMEM;
313*1d57628fSDongsheng Yang 		goto free_kset;
314*1d57628fSDongsheng Yang 	}
315*1d57628fSDongsheng Yang 
316*1d57628fSDongsheng Yang 	for_each_possible_cpu(cpu) {
317*1d57628fSDongsheng Yang 		struct pcache_cache_data_head *h =
318*1d57628fSDongsheng Yang 			per_cpu_ptr(cache->data_heads, cpu);
319*1d57628fSDongsheng Yang 		h->head_pos.cache_seg = NULL;
320*1d57628fSDongsheng Yang 	}
321*1d57628fSDongsheng Yang 
322*1d57628fSDongsheng Yang 	/*
323*1d57628fSDongsheng Yang 	 * Replay persisted cache keys using cache_replay.
324*1d57628fSDongsheng Yang 	 * This function loads and replays cache keys from previously stored
325*1d57628fSDongsheng Yang 	 * ksets, allowing the cache to restore its state after a restart.
326*1d57628fSDongsheng Yang 	 */
327*1d57628fSDongsheng Yang 	ret = cache_replay(cache);
328*1d57628fSDongsheng Yang 	if (ret) {
329*1d57628fSDongsheng Yang 		pcache_dev_err(pcache, "failed to replay keys\n");
330*1d57628fSDongsheng Yang 		goto free_heads;
331*1d57628fSDongsheng Yang 	}
332*1d57628fSDongsheng Yang 
333*1d57628fSDongsheng Yang 	return 0;
334*1d57628fSDongsheng Yang 
335*1d57628fSDongsheng Yang free_heads:
336*1d57628fSDongsheng Yang 	free_percpu(cache->data_heads);
337*1d57628fSDongsheng Yang free_kset:
338*1d57628fSDongsheng Yang 	kvfree(cache->ksets);
339*1d57628fSDongsheng Yang req_tree_exit:
340*1d57628fSDongsheng Yang 	cache_tree_exit(&cache->req_key_tree);
341*1d57628fSDongsheng Yang err:
342*1d57628fSDongsheng Yang 	return ret;
343*1d57628fSDongsheng Yang }
344*1d57628fSDongsheng Yang 
345*1d57628fSDongsheng Yang static void cache_destroy_req_keys(struct pcache_cache *cache)
346*1d57628fSDongsheng Yang {
347*1d57628fSDongsheng Yang 	u32 i;
348*1d57628fSDongsheng Yang 
349*1d57628fSDongsheng Yang 	for (i = 0; i < cache->n_ksets; i++) {
350*1d57628fSDongsheng Yang 		struct pcache_cache_kset *kset = get_kset(cache, i);
351*1d57628fSDongsheng Yang 
352*1d57628fSDongsheng Yang 		cancel_delayed_work_sync(&kset->flush_work);
353*1d57628fSDongsheng Yang 	}
354*1d57628fSDongsheng Yang 
355*1d57628fSDongsheng Yang 	free_percpu(cache->data_heads);
356*1d57628fSDongsheng Yang 	kvfree(cache->ksets);
357*1d57628fSDongsheng Yang 	cache_tree_exit(&cache->req_key_tree);
358*1d57628fSDongsheng Yang }
359*1d57628fSDongsheng Yang 
360*1d57628fSDongsheng Yang int pcache_cache_start(struct dm_pcache *pcache)
361*1d57628fSDongsheng Yang {
362*1d57628fSDongsheng Yang 	struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
363*1d57628fSDongsheng Yang 	struct pcache_cache *cache = &pcache->cache;
364*1d57628fSDongsheng Yang 	struct pcache_cache_options *opts = &pcache->opts;
365*1d57628fSDongsheng Yang 	int ret;
366*1d57628fSDongsheng Yang 
367*1d57628fSDongsheng Yang 	ret = cache_init(pcache);
368*1d57628fSDongsheng Yang 	if (ret)
369*1d57628fSDongsheng Yang 		return ret;
370*1d57628fSDongsheng Yang 
371*1d57628fSDongsheng Yang 	cache->cache_info_addr = CACHE_DEV_CACHE_INFO(cache->cache_dev);
372*1d57628fSDongsheng Yang 	cache->cache_ctrl = CACHE_DEV_CACHE_CTRL(cache->cache_dev);
373*1d57628fSDongsheng Yang 	backing_dev->cache = cache;
374*1d57628fSDongsheng Yang 	cache->dev_size = backing_dev->dev_size;
375*1d57628fSDongsheng Yang 
376*1d57628fSDongsheng Yang 	ret = cache_info_init(cache, opts);
377*1d57628fSDongsheng Yang 	if (ret)
378*1d57628fSDongsheng Yang 		goto cache_exit;
379*1d57628fSDongsheng Yang 
380*1d57628fSDongsheng Yang 	ret = cache_segs_init(cache);
381*1d57628fSDongsheng Yang 	if (ret)
382*1d57628fSDongsheng Yang 		goto cache_exit;
383*1d57628fSDongsheng Yang 
384*1d57628fSDongsheng Yang 	ret = cache_tail_init(cache);
385*1d57628fSDongsheng Yang 	if (ret)
386*1d57628fSDongsheng Yang 		goto cache_exit;
387*1d57628fSDongsheng Yang 
388*1d57628fSDongsheng Yang 	ret = cache_init_req_keys(cache, num_online_cpus());
389*1d57628fSDongsheng Yang 	if (ret)
390*1d57628fSDongsheng Yang 		goto cache_exit;
391*1d57628fSDongsheng Yang 
392*1d57628fSDongsheng Yang 	ret = cache_writeback_init(cache);
393*1d57628fSDongsheng Yang 	if (ret)
394*1d57628fSDongsheng Yang 		goto destroy_keys;
395*1d57628fSDongsheng Yang 
396*1d57628fSDongsheng Yang 	cache->cache_info.flags |= PCACHE_CACHE_FLAGS_INIT_DONE;
397*1d57628fSDongsheng Yang 	cache_info_write(cache);
398*1d57628fSDongsheng Yang 	queue_delayed_work(cache_get_wq(cache), &cache->gc_work, 0);
399*1d57628fSDongsheng Yang 
400*1d57628fSDongsheng Yang 	return 0;
401*1d57628fSDongsheng Yang 
402*1d57628fSDongsheng Yang destroy_keys:
403*1d57628fSDongsheng Yang 	cache_destroy_req_keys(cache);
404*1d57628fSDongsheng Yang cache_exit:
405*1d57628fSDongsheng Yang 	cache_exit(cache);
406*1d57628fSDongsheng Yang 
407*1d57628fSDongsheng Yang 	return ret;
408*1d57628fSDongsheng Yang }
409*1d57628fSDongsheng Yang 
410*1d57628fSDongsheng Yang void pcache_cache_stop(struct dm_pcache *pcache)
411*1d57628fSDongsheng Yang {
412*1d57628fSDongsheng Yang 	struct pcache_cache *cache = &pcache->cache;
413*1d57628fSDongsheng Yang 
414*1d57628fSDongsheng Yang 	cache_flush(cache);
415*1d57628fSDongsheng Yang 
416*1d57628fSDongsheng Yang 	cancel_delayed_work_sync(&cache->gc_work);
417*1d57628fSDongsheng Yang 	flush_work(&cache->clean_work);
418*1d57628fSDongsheng Yang 	cache_writeback_exit(cache);
419*1d57628fSDongsheng Yang 
420*1d57628fSDongsheng Yang 	if (cache->req_key_tree.n_subtrees)
421*1d57628fSDongsheng Yang 		cache_destroy_req_keys(cache);
422*1d57628fSDongsheng Yang 
423*1d57628fSDongsheng Yang 	cache_exit(cache);
424*1d57628fSDongsheng Yang }
425*1d57628fSDongsheng Yang 
426*1d57628fSDongsheng Yang struct workqueue_struct *cache_get_wq(struct pcache_cache *cache)
427*1d57628fSDongsheng Yang {
428*1d57628fSDongsheng Yang 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
429*1d57628fSDongsheng Yang 
430*1d57628fSDongsheng Yang 	return pcache->task_wq;
431*1d57628fSDongsheng Yang }
432*1d57628fSDongsheng Yang 
433*1d57628fSDongsheng Yang int pcache_cache_init(void)
434*1d57628fSDongsheng Yang {
435*1d57628fSDongsheng Yang 	key_cache = KMEM_CACHE(pcache_cache_key, 0);
436*1d57628fSDongsheng Yang 	if (!key_cache)
437*1d57628fSDongsheng Yang 		return -ENOMEM;
438*1d57628fSDongsheng Yang 
439*1d57628fSDongsheng Yang 	return 0;
440*1d57628fSDongsheng Yang }
441*1d57628fSDongsheng Yang 
442*1d57628fSDongsheng Yang void pcache_cache_exit(void)
443*1d57628fSDongsheng Yang {
444*1d57628fSDongsheng Yang 	kmem_cache_destroy(key_cache);
445*1d57628fSDongsheng Yang }
446