xref: /linux/drivers/md/dm-pcache/cache.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <linux/blk_types.h>
3 
4 #include "cache.h"
5 #include "cache_dev.h"
6 #include "backing_dev.h"
7 #include "dm_pcache.h"
8 
9 struct kmem_cache *key_cache;
10 
11 static inline struct pcache_cache_info *get_cache_info_addr(struct pcache_cache *cache)
12 {
13 	return cache->cache_info_addr + cache->info_index;
14 }
15 
16 static void cache_info_write(struct pcache_cache *cache)
17 {
18 	struct pcache_cache_info *cache_info = &cache->cache_info;
19 
20 	cache_info->header.seq++;
21 	cache_info->header.crc = pcache_meta_crc(&cache_info->header,
22 						sizeof(struct pcache_cache_info));
23 
24 	memcpy_flushcache(get_cache_info_addr(cache), cache_info,
25 			sizeof(struct pcache_cache_info));
26 
27 	cache->info_index = (cache->info_index + 1) % PCACHE_META_INDEX_MAX;
28 }
29 
30 static void cache_info_init_default(struct pcache_cache *cache);
31 static int cache_info_init(struct pcache_cache *cache, struct pcache_cache_options *opts)
32 {
33 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
34 	struct pcache_cache_info *cache_info_addr;
35 
36 	cache_info_addr = pcache_meta_find_latest(&cache->cache_info_addr->header,
37 						sizeof(struct pcache_cache_info),
38 						PCACHE_CACHE_INFO_SIZE,
39 						&cache->cache_info);
40 	if (IS_ERR(cache_info_addr))
41 		return PTR_ERR(cache_info_addr);
42 
43 	if (cache_info_addr) {
44 		if (opts->data_crc !=
45 				(cache->cache_info.flags & PCACHE_CACHE_FLAGS_DATA_CRC)) {
46 			pcache_dev_err(pcache, "invalid option for data_crc: %s, expected: %s",
47 					opts->data_crc ? "true" : "false",
48 					cache->cache_info.flags & PCACHE_CACHE_FLAGS_DATA_CRC ? "true" : "false");
49 			return -EINVAL;
50 		}
51 
52 		return 0;
53 	}
54 
55 	/* init cache_info for new cache */
56 	cache_info_init_default(cache);
57 	cache_mode_set(cache, opts->cache_mode);
58 	if (opts->data_crc)
59 		cache->cache_info.flags |= PCACHE_CACHE_FLAGS_DATA_CRC;
60 
61 	return 0;
62 }
63 
64 static void cache_info_set_gc_percent(struct pcache_cache_info *cache_info, u8 percent)
65 {
66 	cache_info->flags &= ~PCACHE_CACHE_FLAGS_GC_PERCENT_MASK;
67 	cache_info->flags |= FIELD_PREP(PCACHE_CACHE_FLAGS_GC_PERCENT_MASK, percent);
68 }
69 
70 int pcache_cache_set_gc_percent(struct pcache_cache *cache, u8 percent)
71 {
72 	if (percent > PCACHE_CACHE_GC_PERCENT_MAX || percent < PCACHE_CACHE_GC_PERCENT_MIN)
73 		return -EINVAL;
74 
75 	mutex_lock(&cache->cache_info_lock);
76 	cache_info_set_gc_percent(&cache->cache_info, percent);
77 
78 	cache_info_write(cache);
79 	mutex_unlock(&cache->cache_info_lock);
80 
81 	return 0;
82 }
83 
84 void cache_pos_encode(struct pcache_cache *cache,
85 			     struct pcache_cache_pos_onmedia *pos_onmedia_base,
86 			     struct pcache_cache_pos *pos, u64 seq, u32 *index)
87 {
88 	struct pcache_cache_pos_onmedia pos_onmedia;
89 	struct pcache_cache_pos_onmedia *pos_onmedia_addr = pos_onmedia_base + *index;
90 
91 	pos_onmedia.cache_seg_id = pos->cache_seg->cache_seg_id;
92 	pos_onmedia.seg_off = pos->seg_off;
93 	pos_onmedia.header.seq = seq;
94 	pos_onmedia.header.crc = cache_pos_onmedia_crc(&pos_onmedia);
95 
96 	memcpy_flushcache(pos_onmedia_addr, &pos_onmedia, sizeof(struct pcache_cache_pos_onmedia));
97 	pmem_wmb();
98 
99 	*index = (*index + 1) % PCACHE_META_INDEX_MAX;
100 }
101 
102 int cache_pos_decode(struct pcache_cache *cache,
103 			    struct pcache_cache_pos_onmedia *pos_onmedia,
104 			    struct pcache_cache_pos *pos, u64 *seq, u32 *index)
105 {
106 	struct pcache_cache_pos_onmedia latest, *latest_addr;
107 
108 	latest_addr = pcache_meta_find_latest(&pos_onmedia->header,
109 					sizeof(struct pcache_cache_pos_onmedia),
110 					sizeof(struct pcache_cache_pos_onmedia),
111 					&latest);
112 	if (IS_ERR(latest_addr))
113 		return PTR_ERR(latest_addr);
114 
115 	if (!latest_addr)
116 		return -EIO;
117 
118 	pos->cache_seg = &cache->segments[latest.cache_seg_id];
119 	pos->seg_off = latest.seg_off;
120 	*seq = latest.header.seq;
121 	*index = (latest_addr - pos_onmedia);
122 
123 	return 0;
124 }
125 
126 static inline void cache_info_set_seg_id(struct pcache_cache *cache, u32 seg_id)
127 {
128 	cache->cache_info.seg_id = seg_id;
129 }
130 
131 static int cache_init(struct dm_pcache *pcache)
132 {
133 	struct pcache_cache *cache = &pcache->cache;
134 	struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
135 	struct pcache_cache_dev *cache_dev = &pcache->cache_dev;
136 	int ret;
137 
138 	cache->segments = kvcalloc(cache_dev->seg_num, sizeof(struct pcache_cache_segment), GFP_KERNEL);
139 	if (!cache->segments) {
140 		ret = -ENOMEM;
141 		goto err;
142 	}
143 
144 	cache->seg_map = kvcalloc(BITS_TO_LONGS(cache_dev->seg_num), sizeof(unsigned long), GFP_KERNEL);
145 	if (!cache->seg_map) {
146 		ret = -ENOMEM;
147 		goto free_segments;
148 	}
149 
150 	cache->backing_dev = backing_dev;
151 	cache->cache_dev = &pcache->cache_dev;
152 	cache->n_segs = cache_dev->seg_num;
153 	atomic_set(&cache->gc_errors, 0);
154 	spin_lock_init(&cache->seg_map_lock);
155 	spin_lock_init(&cache->key_head_lock);
156 
157 	mutex_init(&cache->cache_info_lock);
158 	mutex_init(&cache->key_tail_lock);
159 	mutex_init(&cache->dirty_tail_lock);
160 	mutex_init(&cache->writeback_lock);
161 
162 	INIT_DELAYED_WORK(&cache->writeback_work, cache_writeback_fn);
163 	INIT_DELAYED_WORK(&cache->gc_work, pcache_cache_gc_fn);
164 	INIT_WORK(&cache->clean_work, clean_fn);
165 
166 	return 0;
167 
168 free_segments:
169 	kvfree(cache->segments);
170 err:
171 	return ret;
172 }
173 
174 static void cache_exit(struct pcache_cache *cache)
175 {
176 	kvfree(cache->seg_map);
177 	kvfree(cache->segments);
178 }
179 
180 static void cache_info_init_default(struct pcache_cache *cache)
181 {
182 	struct pcache_cache_info *cache_info = &cache->cache_info;
183 
184 	cache_info->header.seq = 0;
185 	cache_info->n_segs = cache->cache_dev->seg_num;
186 	cache_info_set_gc_percent(cache_info, PCACHE_CACHE_GC_PERCENT_DEFAULT);
187 }
188 
189 static int cache_tail_init(struct pcache_cache *cache)
190 {
191 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
192 	bool new_cache = !(cache->cache_info.flags & PCACHE_CACHE_FLAGS_INIT_DONE);
193 
194 	if (new_cache) {
195 		__set_bit(0, cache->seg_map);
196 
197 		cache->key_head.cache_seg = &cache->segments[0];
198 		cache->key_head.seg_off = 0;
199 		cache_pos_copy(&cache->key_tail, &cache->key_head);
200 		cache_pos_copy(&cache->dirty_tail, &cache->key_head);
201 
202 		cache_encode_dirty_tail(cache);
203 		cache_encode_key_tail(cache);
204 	} else {
205 		if (cache_decode_key_tail(cache) || cache_decode_dirty_tail(cache)) {
206 			pcache_dev_err(pcache, "Corrupted key tail or dirty tail.\n");
207 			return -EIO;
208 		}
209 	}
210 
211 	return 0;
212 }
213 
214 static int get_seg_id(struct pcache_cache *cache,
215 		      struct pcache_cache_segment *prev_cache_seg,
216 		      bool new_cache, u32 *seg_id)
217 {
218 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
219 	struct pcache_cache_dev *cache_dev = cache->cache_dev;
220 	int ret;
221 
222 	if (new_cache) {
223 		ret = cache_dev_get_empty_segment_id(cache_dev, seg_id);
224 		if (ret) {
225 			pcache_dev_err(pcache, "no available segment\n");
226 			goto err;
227 		}
228 
229 		if (prev_cache_seg)
230 			cache_seg_set_next_seg(prev_cache_seg, *seg_id);
231 		else
232 			cache_info_set_seg_id(cache, *seg_id);
233 	} else {
234 		if (prev_cache_seg) {
235 			struct pcache_segment_info *prev_seg_info;
236 
237 			prev_seg_info = &prev_cache_seg->cache_seg_info;
238 			if (!segment_info_has_next(prev_seg_info)) {
239 				ret = -EFAULT;
240 				goto err;
241 			}
242 			*seg_id = prev_cache_seg->cache_seg_info.next_seg;
243 		} else {
244 			*seg_id = cache->cache_info.seg_id;
245 		}
246 	}
247 	return 0;
248 err:
249 	return ret;
250 }
251 
252 static int cache_segs_init(struct pcache_cache *cache)
253 {
254 	struct pcache_cache_segment *prev_cache_seg = NULL;
255 	struct pcache_cache_info *cache_info = &cache->cache_info;
256 	bool new_cache = !(cache->cache_info.flags & PCACHE_CACHE_FLAGS_INIT_DONE);
257 	u32 seg_id;
258 	int ret;
259 	u32 i;
260 
261 	for (i = 0; i < cache_info->n_segs; i++) {
262 		ret = get_seg_id(cache, prev_cache_seg, new_cache, &seg_id);
263 		if (ret)
264 			goto err;
265 
266 		ret = cache_seg_init(cache, seg_id, i, new_cache);
267 		if (ret)
268 			goto err;
269 
270 		prev_cache_seg = &cache->segments[i];
271 	}
272 	return 0;
273 err:
274 	return ret;
275 }
276 
277 static int cache_init_req_keys(struct pcache_cache *cache, u32 n_paral)
278 {
279 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
280 	u32 n_subtrees;
281 	int ret;
282 	u32 i, cpu;
283 
284 	/* Calculate number of cache trees based on the device size */
285 	n_subtrees = DIV_ROUND_UP(cache->dev_size << SECTOR_SHIFT, PCACHE_CACHE_SUBTREE_SIZE);
286 	ret = cache_tree_init(cache, &cache->req_key_tree, n_subtrees);
287 	if (ret)
288 		goto err;
289 
290 	cache->n_ksets = n_paral;
291 	cache->ksets = kvcalloc(cache->n_ksets, PCACHE_KSET_SIZE, GFP_KERNEL);
292 	if (!cache->ksets) {
293 		ret = -ENOMEM;
294 		goto req_tree_exit;
295 	}
296 
297 	/*
298 	 * Initialize each kset with a spinlock and delayed work for flushing.
299 	 * Each kset is associated with one queue to ensure independent handling
300 	 * of cache keys across multiple queues, maximizing multiqueue concurrency.
301 	 */
302 	for (i = 0; i < cache->n_ksets; i++) {
303 		struct pcache_cache_kset *kset = get_kset(cache, i);
304 
305 		kset->cache = cache;
306 		spin_lock_init(&kset->kset_lock);
307 		INIT_DELAYED_WORK(&kset->flush_work, kset_flush_fn);
308 	}
309 
310 	cache->data_heads = alloc_percpu(struct pcache_cache_data_head);
311 	if (!cache->data_heads) {
312 		ret = -ENOMEM;
313 		goto free_kset;
314 	}
315 
316 	for_each_possible_cpu(cpu) {
317 		struct pcache_cache_data_head *h =
318 			per_cpu_ptr(cache->data_heads, cpu);
319 		h->head_pos.cache_seg = NULL;
320 	}
321 
322 	/*
323 	 * Replay persisted cache keys using cache_replay.
324 	 * This function loads and replays cache keys from previously stored
325 	 * ksets, allowing the cache to restore its state after a restart.
326 	 */
327 	ret = cache_replay(cache);
328 	if (ret) {
329 		pcache_dev_err(pcache, "failed to replay keys\n");
330 		goto free_heads;
331 	}
332 
333 	return 0;
334 
335 free_heads:
336 	free_percpu(cache->data_heads);
337 free_kset:
338 	kvfree(cache->ksets);
339 req_tree_exit:
340 	cache_tree_exit(&cache->req_key_tree);
341 err:
342 	return ret;
343 }
344 
345 static void cache_destroy_req_keys(struct pcache_cache *cache)
346 {
347 	u32 i;
348 
349 	for (i = 0; i < cache->n_ksets; i++) {
350 		struct pcache_cache_kset *kset = get_kset(cache, i);
351 
352 		cancel_delayed_work_sync(&kset->flush_work);
353 	}
354 
355 	free_percpu(cache->data_heads);
356 	kvfree(cache->ksets);
357 	cache_tree_exit(&cache->req_key_tree);
358 }
359 
360 int pcache_cache_start(struct dm_pcache *pcache)
361 {
362 	struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
363 	struct pcache_cache *cache = &pcache->cache;
364 	struct pcache_cache_options *opts = &pcache->opts;
365 	int ret;
366 
367 	ret = cache_init(pcache);
368 	if (ret)
369 		return ret;
370 
371 	cache->cache_info_addr = CACHE_DEV_CACHE_INFO(cache->cache_dev);
372 	cache->cache_ctrl = CACHE_DEV_CACHE_CTRL(cache->cache_dev);
373 	backing_dev->cache = cache;
374 	cache->dev_size = backing_dev->dev_size;
375 
376 	ret = cache_info_init(cache, opts);
377 	if (ret)
378 		goto cache_exit;
379 
380 	ret = cache_segs_init(cache);
381 	if (ret)
382 		goto cache_exit;
383 
384 	ret = cache_tail_init(cache);
385 	if (ret)
386 		goto cache_exit;
387 
388 	ret = cache_init_req_keys(cache, num_online_cpus());
389 	if (ret)
390 		goto cache_exit;
391 
392 	ret = cache_writeback_init(cache);
393 	if (ret)
394 		goto destroy_keys;
395 
396 	cache->cache_info.flags |= PCACHE_CACHE_FLAGS_INIT_DONE;
397 	cache_info_write(cache);
398 	queue_delayed_work(cache_get_wq(cache), &cache->gc_work, 0);
399 
400 	return 0;
401 
402 destroy_keys:
403 	cache_destroy_req_keys(cache);
404 cache_exit:
405 	cache_exit(cache);
406 
407 	return ret;
408 }
409 
410 void pcache_cache_stop(struct dm_pcache *pcache)
411 {
412 	struct pcache_cache *cache = &pcache->cache;
413 
414 	cache_flush(cache);
415 
416 	cancel_delayed_work_sync(&cache->gc_work);
417 	flush_work(&cache->clean_work);
418 	cache_writeback_exit(cache);
419 
420 	if (cache->req_key_tree.n_subtrees)
421 		cache_destroy_req_keys(cache);
422 
423 	cache_exit(cache);
424 }
425 
426 struct workqueue_struct *cache_get_wq(struct pcache_cache *cache)
427 {
428 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
429 
430 	return pcache->task_wq;
431 }
432 
433 int pcache_cache_init(void)
434 {
435 	key_cache = KMEM_CACHE(pcache_cache_key, 0);
436 	if (!key_cache)
437 		return -ENOMEM;
438 
439 	return 0;
440 }
441 
442 void pcache_cache_exit(void)
443 {
444 	kmem_cache_destroy(key_cache);
445 }
446