xref: /linux/drivers/md/dm-pcache/cache.c (revision 8934827db5403eae57d4537114a9ff88b0a8460f)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <linux/blk_types.h>
3 
4 #include "cache.h"
5 #include "cache_dev.h"
6 #include "backing_dev.h"
7 #include "dm_pcache.h"
8 
9 struct kmem_cache *key_cache;
10 
get_cache_info_addr(struct pcache_cache * cache)11 static inline struct pcache_cache_info *get_cache_info_addr(struct pcache_cache *cache)
12 {
13 	return (struct pcache_cache_info *)((char *)cache->cache_info_addr +
14 						(size_t)cache->info_index * PCACHE_CACHE_INFO_SIZE);
15 }
16 
cache_info_write(struct pcache_cache * cache)17 static void cache_info_write(struct pcache_cache *cache)
18 {
19 	struct pcache_cache_info *cache_info = &cache->cache_info;
20 
21 	cache_info->header.seq++;
22 	cache_info->header.crc = pcache_meta_crc(&cache_info->header,
23 						sizeof(struct pcache_cache_info));
24 
25 	cache->info_index = (cache->info_index + 1) % PCACHE_META_INDEX_MAX;
26 	memcpy_flushcache(get_cache_info_addr(cache), cache_info,
27 			sizeof(struct pcache_cache_info));
28 	pmem_wmb();
29 }
30 
31 static void cache_info_init_default(struct pcache_cache *cache);
cache_info_init(struct pcache_cache * cache,struct pcache_cache_options * opts)32 static int cache_info_init(struct pcache_cache *cache, struct pcache_cache_options *opts)
33 {
34 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
35 	struct pcache_cache_info *cache_info_addr;
36 
37 	cache_info_addr = pcache_meta_find_latest(&cache->cache_info_addr->header,
38 						sizeof(struct pcache_cache_info),
39 						PCACHE_CACHE_INFO_SIZE,
40 						&cache->cache_info);
41 	if (IS_ERR(cache_info_addr))
42 		return PTR_ERR(cache_info_addr);
43 
44 	if (cache_info_addr) {
45 		if (opts->data_crc !=
46 				(cache->cache_info.flags & PCACHE_CACHE_FLAGS_DATA_CRC)) {
47 			pcache_dev_err(pcache, "invalid option for data_crc: %s, expected: %s",
48 					opts->data_crc ? "true" : "false",
49 					cache->cache_info.flags & PCACHE_CACHE_FLAGS_DATA_CRC ? "true" : "false");
50 			return -EINVAL;
51 		}
52 
53 		cache->info_index = ((char *)cache_info_addr - (char *)cache->cache_info_addr) / PCACHE_CACHE_INFO_SIZE;
54 
55 		return 0;
56 	}
57 
58 	/* init cache_info for new cache */
59 	cache_info_init_default(cache);
60 	cache_mode_set(cache, opts->cache_mode);
61 	if (opts->data_crc)
62 		cache->cache_info.flags |= PCACHE_CACHE_FLAGS_DATA_CRC;
63 
64 	return 0;
65 }
66 
cache_info_set_gc_percent(struct pcache_cache_info * cache_info,u8 percent)67 static void cache_info_set_gc_percent(struct pcache_cache_info *cache_info, u8 percent)
68 {
69 	cache_info->flags &= ~PCACHE_CACHE_FLAGS_GC_PERCENT_MASK;
70 	cache_info->flags |= FIELD_PREP(PCACHE_CACHE_FLAGS_GC_PERCENT_MASK, percent);
71 }
72 
pcache_cache_set_gc_percent(struct pcache_cache * cache,u8 percent)73 int pcache_cache_set_gc_percent(struct pcache_cache *cache, u8 percent)
74 {
75 	if (percent > PCACHE_CACHE_GC_PERCENT_MAX || percent < PCACHE_CACHE_GC_PERCENT_MIN)
76 		return -EINVAL;
77 
78 	mutex_lock(&cache->cache_info_lock);
79 	cache_info_set_gc_percent(&cache->cache_info, percent);
80 
81 	cache_info_write(cache);
82 	mutex_unlock(&cache->cache_info_lock);
83 
84 	return 0;
85 }
86 
cache_pos_encode(struct pcache_cache * cache,struct pcache_cache_pos_onmedia * pos_onmedia_base,struct pcache_cache_pos * pos,u64 seq,u32 * index)87 void cache_pos_encode(struct pcache_cache *cache,
88 			     struct pcache_cache_pos_onmedia *pos_onmedia_base,
89 			     struct pcache_cache_pos *pos, u64 seq, u32 *index)
90 {
91 	struct pcache_cache_pos_onmedia pos_onmedia;
92 	struct pcache_cache_pos_onmedia *pos_onmedia_addr = pos_onmedia_base + *index;
93 
94 	pos_onmedia.cache_seg_id = pos->cache_seg->cache_seg_id;
95 	pos_onmedia.seg_off = pos->seg_off;
96 	pos_onmedia.header.seq = seq;
97 	pos_onmedia.header.crc = cache_pos_onmedia_crc(&pos_onmedia);
98 
99 	*index = (*index + 1) % PCACHE_META_INDEX_MAX;
100 
101 	memcpy_flushcache(pos_onmedia_addr, &pos_onmedia, sizeof(struct pcache_cache_pos_onmedia));
102 	pmem_wmb();
103 }
104 
cache_pos_decode(struct pcache_cache * cache,struct pcache_cache_pos_onmedia * pos_onmedia,struct pcache_cache_pos * pos,u64 * seq,u32 * index)105 int cache_pos_decode(struct pcache_cache *cache,
106 			    struct pcache_cache_pos_onmedia *pos_onmedia,
107 			    struct pcache_cache_pos *pos, u64 *seq, u32 *index)
108 {
109 	struct pcache_cache_pos_onmedia latest, *latest_addr;
110 
111 	latest_addr = pcache_meta_find_latest(&pos_onmedia->header,
112 					sizeof(struct pcache_cache_pos_onmedia),
113 					sizeof(struct pcache_cache_pos_onmedia),
114 					&latest);
115 	if (IS_ERR(latest_addr))
116 		return PTR_ERR(latest_addr);
117 
118 	if (!latest_addr)
119 		return -EIO;
120 
121 	pos->cache_seg = &cache->segments[latest.cache_seg_id];
122 	pos->seg_off = latest.seg_off;
123 	*seq = latest.header.seq;
124 	*index = (latest_addr - pos_onmedia);
125 
126 	return 0;
127 }
128 
cache_info_set_seg_id(struct pcache_cache * cache,u32 seg_id)129 static inline void cache_info_set_seg_id(struct pcache_cache *cache, u32 seg_id)
130 {
131 	cache->cache_info.seg_id = seg_id;
132 }
133 
cache_init(struct dm_pcache * pcache)134 static int cache_init(struct dm_pcache *pcache)
135 {
136 	struct pcache_cache *cache = &pcache->cache;
137 	struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
138 	struct pcache_cache_dev *cache_dev = &pcache->cache_dev;
139 	int ret;
140 
141 	cache->segments = kvzalloc_objs(struct pcache_cache_segment,
142 					cache_dev->seg_num, GFP_KERNEL);
143 	if (!cache->segments) {
144 		ret = -ENOMEM;
145 		goto err;
146 	}
147 
148 	cache->seg_map = kvcalloc(BITS_TO_LONGS(cache_dev->seg_num), sizeof(unsigned long), GFP_KERNEL);
149 	if (!cache->seg_map) {
150 		ret = -ENOMEM;
151 		goto free_segments;
152 	}
153 
154 	cache->backing_dev = backing_dev;
155 	cache->cache_dev = &pcache->cache_dev;
156 	cache->n_segs = cache_dev->seg_num;
157 	atomic_set(&cache->gc_errors, 0);
158 	spin_lock_init(&cache->seg_map_lock);
159 	spin_lock_init(&cache->key_head_lock);
160 
161 	mutex_init(&cache->cache_info_lock);
162 	mutex_init(&cache->key_tail_lock);
163 	mutex_init(&cache->dirty_tail_lock);
164 	mutex_init(&cache->writeback_lock);
165 
166 	INIT_DELAYED_WORK(&cache->writeback_work, cache_writeback_fn);
167 	INIT_DELAYED_WORK(&cache->gc_work, pcache_cache_gc_fn);
168 	INIT_WORK(&cache->clean_work, clean_fn);
169 
170 	return 0;
171 
172 free_segments:
173 	kvfree(cache->segments);
174 err:
175 	return ret;
176 }
177 
cache_exit(struct pcache_cache * cache)178 static void cache_exit(struct pcache_cache *cache)
179 {
180 	kvfree(cache->seg_map);
181 	kvfree(cache->segments);
182 }
183 
cache_info_init_default(struct pcache_cache * cache)184 static void cache_info_init_default(struct pcache_cache *cache)
185 {
186 	struct pcache_cache_info *cache_info = &cache->cache_info;
187 
188 	memset(cache_info, 0, sizeof(*cache_info));
189 	cache_info->n_segs = cache->cache_dev->seg_num;
190 	cache_info_set_gc_percent(cache_info, PCACHE_CACHE_GC_PERCENT_DEFAULT);
191 }
192 
cache_tail_init(struct pcache_cache * cache)193 static int cache_tail_init(struct pcache_cache *cache)
194 {
195 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
196 	bool new_cache = !(cache->cache_info.flags & PCACHE_CACHE_FLAGS_INIT_DONE);
197 
198 	if (new_cache) {
199 		__set_bit(0, cache->seg_map);
200 
201 		cache->key_head.cache_seg = &cache->segments[0];
202 		cache->key_head.seg_off = 0;
203 		cache_pos_copy(&cache->key_tail, &cache->key_head);
204 		cache_pos_copy(&cache->dirty_tail, &cache->key_head);
205 
206 		cache_encode_dirty_tail(cache);
207 		cache_encode_key_tail(cache);
208 	} else {
209 		if (cache_decode_key_tail(cache) || cache_decode_dirty_tail(cache)) {
210 			pcache_dev_err(pcache, "Corrupted key tail or dirty tail.\n");
211 			return -EIO;
212 		}
213 	}
214 
215 	return 0;
216 }
217 
get_seg_id(struct pcache_cache * cache,struct pcache_cache_segment * prev_cache_seg,bool new_cache,u32 * seg_id)218 static int get_seg_id(struct pcache_cache *cache,
219 		      struct pcache_cache_segment *prev_cache_seg,
220 		      bool new_cache, u32 *seg_id)
221 {
222 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
223 	struct pcache_cache_dev *cache_dev = cache->cache_dev;
224 	int ret;
225 
226 	if (new_cache) {
227 		ret = cache_dev_get_empty_segment_id(cache_dev, seg_id);
228 		if (ret) {
229 			pcache_dev_err(pcache, "no available segment\n");
230 			goto err;
231 		}
232 
233 		if (prev_cache_seg)
234 			cache_seg_set_next_seg(prev_cache_seg, *seg_id);
235 		else
236 			cache_info_set_seg_id(cache, *seg_id);
237 	} else {
238 		if (prev_cache_seg) {
239 			struct pcache_segment_info *prev_seg_info;
240 
241 			prev_seg_info = &prev_cache_seg->cache_seg_info;
242 			if (!segment_info_has_next(prev_seg_info)) {
243 				ret = -EFAULT;
244 				goto err;
245 			}
246 			*seg_id = prev_cache_seg->cache_seg_info.next_seg;
247 		} else {
248 			*seg_id = cache->cache_info.seg_id;
249 		}
250 	}
251 	return 0;
252 err:
253 	return ret;
254 }
255 
cache_segs_init(struct pcache_cache * cache)256 static int cache_segs_init(struct pcache_cache *cache)
257 {
258 	struct pcache_cache_segment *prev_cache_seg = NULL;
259 	struct pcache_cache_info *cache_info = &cache->cache_info;
260 	bool new_cache = !(cache->cache_info.flags & PCACHE_CACHE_FLAGS_INIT_DONE);
261 	u32 seg_id;
262 	int ret;
263 	u32 i;
264 
265 	for (i = 0; i < cache_info->n_segs; i++) {
266 		ret = get_seg_id(cache, prev_cache_seg, new_cache, &seg_id);
267 		if (ret)
268 			goto err;
269 
270 		ret = cache_seg_init(cache, seg_id, i, new_cache);
271 		if (ret)
272 			goto err;
273 
274 		prev_cache_seg = &cache->segments[i];
275 	}
276 	return 0;
277 err:
278 	return ret;
279 }
280 
cache_init_req_keys(struct pcache_cache * cache,u32 n_paral)281 static int cache_init_req_keys(struct pcache_cache *cache, u32 n_paral)
282 {
283 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
284 	u32 n_subtrees;
285 	int ret;
286 	u32 i, cpu;
287 
288 	/* Calculate number of cache trees based on the device size */
289 	n_subtrees = DIV_ROUND_UP(cache->dev_size << SECTOR_SHIFT, PCACHE_CACHE_SUBTREE_SIZE);
290 	ret = cache_tree_init(cache, &cache->req_key_tree, n_subtrees);
291 	if (ret)
292 		goto err;
293 
294 	cache->n_ksets = n_paral;
295 	cache->ksets = kvcalloc(cache->n_ksets, PCACHE_KSET_SIZE, GFP_KERNEL);
296 	if (!cache->ksets) {
297 		ret = -ENOMEM;
298 		goto req_tree_exit;
299 	}
300 
301 	/*
302 	 * Initialize each kset with a spinlock and delayed work for flushing.
303 	 * Each kset is associated with one queue to ensure independent handling
304 	 * of cache keys across multiple queues, maximizing multiqueue concurrency.
305 	 */
306 	for (i = 0; i < cache->n_ksets; i++) {
307 		struct pcache_cache_kset *kset = get_kset(cache, i);
308 
309 		kset->cache = cache;
310 		spin_lock_init(&kset->kset_lock);
311 		INIT_DELAYED_WORK(&kset->flush_work, kset_flush_fn);
312 	}
313 
314 	cache->data_heads = alloc_percpu(struct pcache_cache_data_head);
315 	if (!cache->data_heads) {
316 		ret = -ENOMEM;
317 		goto free_kset;
318 	}
319 
320 	for_each_possible_cpu(cpu) {
321 		struct pcache_cache_data_head *h =
322 			per_cpu_ptr(cache->data_heads, cpu);
323 		h->head_pos.cache_seg = NULL;
324 	}
325 
326 	/*
327 	 * Replay persisted cache keys using cache_replay.
328 	 * This function loads and replays cache keys from previously stored
329 	 * ksets, allowing the cache to restore its state after a restart.
330 	 */
331 	ret = cache_replay(cache);
332 	if (ret) {
333 		pcache_dev_err(pcache, "failed to replay keys\n");
334 		goto free_heads;
335 	}
336 
337 	return 0;
338 
339 free_heads:
340 	free_percpu(cache->data_heads);
341 free_kset:
342 	kvfree(cache->ksets);
343 req_tree_exit:
344 	cache_tree_exit(&cache->req_key_tree);
345 err:
346 	return ret;
347 }
348 
cache_destroy_req_keys(struct pcache_cache * cache)349 static void cache_destroy_req_keys(struct pcache_cache *cache)
350 {
351 	u32 i;
352 
353 	for (i = 0; i < cache->n_ksets; i++) {
354 		struct pcache_cache_kset *kset = get_kset(cache, i);
355 
356 		cancel_delayed_work_sync(&kset->flush_work);
357 	}
358 
359 	free_percpu(cache->data_heads);
360 	kvfree(cache->ksets);
361 	cache_tree_exit(&cache->req_key_tree);
362 }
363 
pcache_cache_start(struct dm_pcache * pcache)364 int pcache_cache_start(struct dm_pcache *pcache)
365 {
366 	struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
367 	struct pcache_cache *cache = &pcache->cache;
368 	struct pcache_cache_options *opts = &pcache->opts;
369 	int ret;
370 
371 	ret = cache_init(pcache);
372 	if (ret)
373 		return ret;
374 
375 	cache->cache_info_addr = CACHE_DEV_CACHE_INFO(cache->cache_dev);
376 	cache->cache_ctrl = CACHE_DEV_CACHE_CTRL(cache->cache_dev);
377 	backing_dev->cache = cache;
378 	cache->dev_size = backing_dev->dev_size;
379 
380 	ret = cache_info_init(cache, opts);
381 	if (ret)
382 		goto cache_exit;
383 
384 	ret = cache_segs_init(cache);
385 	if (ret)
386 		goto cache_exit;
387 
388 	ret = cache_tail_init(cache);
389 	if (ret)
390 		goto cache_exit;
391 
392 	ret = cache_init_req_keys(cache, num_online_cpus());
393 	if (ret)
394 		goto cache_exit;
395 
396 	ret = cache_writeback_init(cache);
397 	if (ret)
398 		goto destroy_keys;
399 
400 	cache->cache_info.flags |= PCACHE_CACHE_FLAGS_INIT_DONE;
401 	cache_info_write(cache);
402 	queue_delayed_work(cache_get_wq(cache), &cache->gc_work, 0);
403 
404 	return 0;
405 
406 destroy_keys:
407 	cache_destroy_req_keys(cache);
408 cache_exit:
409 	cache_exit(cache);
410 
411 	return ret;
412 }
413 
pcache_cache_stop(struct dm_pcache * pcache)414 void pcache_cache_stop(struct dm_pcache *pcache)
415 {
416 	struct pcache_cache *cache = &pcache->cache;
417 
418 	pcache_cache_flush(cache);
419 
420 	cancel_delayed_work_sync(&cache->gc_work);
421 	flush_work(&cache->clean_work);
422 	cache_writeback_exit(cache);
423 
424 	if (cache->req_key_tree.n_subtrees)
425 		cache_destroy_req_keys(cache);
426 
427 	cache_exit(cache);
428 }
429 
cache_get_wq(struct pcache_cache * cache)430 struct workqueue_struct *cache_get_wq(struct pcache_cache *cache)
431 {
432 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
433 
434 	return pcache->task_wq;
435 }
436 
pcache_cache_init(void)437 int pcache_cache_init(void)
438 {
439 	key_cache = KMEM_CACHE(pcache_cache_key, 0);
440 	if (!key_cache)
441 		return -ENOMEM;
442 
443 	return 0;
444 }
445 
pcache_cache_exit(void)446 void pcache_cache_exit(void)
447 {
448 	kmem_cache_destroy(key_cache);
449 }
450