1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include <linux/blk_types.h>
3
4 #include "cache.h"
5 #include "cache_dev.h"
6 #include "backing_dev.h"
7 #include "dm_pcache.h"
8
9 struct kmem_cache *key_cache;
10
get_cache_info_addr(struct pcache_cache * cache)11 static inline struct pcache_cache_info *get_cache_info_addr(struct pcache_cache *cache)
12 {
13 return (struct pcache_cache_info *)((char *)cache->cache_info_addr +
14 (size_t)cache->info_index * PCACHE_CACHE_INFO_SIZE);
15 }
16
cache_info_write(struct pcache_cache * cache)17 static void cache_info_write(struct pcache_cache *cache)
18 {
19 struct pcache_cache_info *cache_info = &cache->cache_info;
20
21 cache_info->header.seq++;
22 cache_info->header.crc = pcache_meta_crc(&cache_info->header,
23 sizeof(struct pcache_cache_info));
24
25 cache->info_index = (cache->info_index + 1) % PCACHE_META_INDEX_MAX;
26 memcpy_flushcache(get_cache_info_addr(cache), cache_info,
27 sizeof(struct pcache_cache_info));
28 pmem_wmb();
29 }
30
31 static void cache_info_init_default(struct pcache_cache *cache);
cache_info_init(struct pcache_cache * cache,struct pcache_cache_options * opts)32 static int cache_info_init(struct pcache_cache *cache, struct pcache_cache_options *opts)
33 {
34 struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
35 struct pcache_cache_info *cache_info_addr;
36
37 cache_info_addr = pcache_meta_find_latest(&cache->cache_info_addr->header,
38 sizeof(struct pcache_cache_info),
39 PCACHE_CACHE_INFO_SIZE,
40 &cache->cache_info);
41 if (IS_ERR(cache_info_addr))
42 return PTR_ERR(cache_info_addr);
43
44 if (cache_info_addr) {
45 if (opts->data_crc !=
46 (cache->cache_info.flags & PCACHE_CACHE_FLAGS_DATA_CRC)) {
47 pcache_dev_err(pcache, "invalid option for data_crc: %s, expected: %s",
48 opts->data_crc ? "true" : "false",
49 cache->cache_info.flags & PCACHE_CACHE_FLAGS_DATA_CRC ? "true" : "false");
50 return -EINVAL;
51 }
52
53 cache->info_index = ((char *)cache_info_addr - (char *)cache->cache_info_addr) / PCACHE_CACHE_INFO_SIZE;
54
55 return 0;
56 }
57
58 /* init cache_info for new cache */
59 cache_info_init_default(cache);
60 cache_mode_set(cache, opts->cache_mode);
61 if (opts->data_crc)
62 cache->cache_info.flags |= PCACHE_CACHE_FLAGS_DATA_CRC;
63
64 return 0;
65 }
66
cache_info_set_gc_percent(struct pcache_cache_info * cache_info,u8 percent)67 static void cache_info_set_gc_percent(struct pcache_cache_info *cache_info, u8 percent)
68 {
69 cache_info->flags &= ~PCACHE_CACHE_FLAGS_GC_PERCENT_MASK;
70 cache_info->flags |= FIELD_PREP(PCACHE_CACHE_FLAGS_GC_PERCENT_MASK, percent);
71 }
72
pcache_cache_set_gc_percent(struct pcache_cache * cache,u8 percent)73 int pcache_cache_set_gc_percent(struct pcache_cache *cache, u8 percent)
74 {
75 if (percent > PCACHE_CACHE_GC_PERCENT_MAX || percent < PCACHE_CACHE_GC_PERCENT_MIN)
76 return -EINVAL;
77
78 mutex_lock(&cache->cache_info_lock);
79 cache_info_set_gc_percent(&cache->cache_info, percent);
80
81 cache_info_write(cache);
82 mutex_unlock(&cache->cache_info_lock);
83
84 return 0;
85 }
86
cache_pos_encode(struct pcache_cache * cache,struct pcache_cache_pos_onmedia * pos_onmedia_base,struct pcache_cache_pos * pos,u64 seq,u32 * index)87 void cache_pos_encode(struct pcache_cache *cache,
88 struct pcache_cache_pos_onmedia *pos_onmedia_base,
89 struct pcache_cache_pos *pos, u64 seq, u32 *index)
90 {
91 struct pcache_cache_pos_onmedia pos_onmedia;
92 struct pcache_cache_pos_onmedia *pos_onmedia_addr = pos_onmedia_base + *index;
93
94 pos_onmedia.cache_seg_id = pos->cache_seg->cache_seg_id;
95 pos_onmedia.seg_off = pos->seg_off;
96 pos_onmedia.header.seq = seq;
97 pos_onmedia.header.crc = cache_pos_onmedia_crc(&pos_onmedia);
98
99 *index = (*index + 1) % PCACHE_META_INDEX_MAX;
100
101 memcpy_flushcache(pos_onmedia_addr, &pos_onmedia, sizeof(struct pcache_cache_pos_onmedia));
102 pmem_wmb();
103 }
104
cache_pos_decode(struct pcache_cache * cache,struct pcache_cache_pos_onmedia * pos_onmedia,struct pcache_cache_pos * pos,u64 * seq,u32 * index)105 int cache_pos_decode(struct pcache_cache *cache,
106 struct pcache_cache_pos_onmedia *pos_onmedia,
107 struct pcache_cache_pos *pos, u64 *seq, u32 *index)
108 {
109 struct pcache_cache_pos_onmedia latest, *latest_addr;
110
111 latest_addr = pcache_meta_find_latest(&pos_onmedia->header,
112 sizeof(struct pcache_cache_pos_onmedia),
113 sizeof(struct pcache_cache_pos_onmedia),
114 &latest);
115 if (IS_ERR(latest_addr))
116 return PTR_ERR(latest_addr);
117
118 if (!latest_addr)
119 return -EIO;
120
121 pos->cache_seg = &cache->segments[latest.cache_seg_id];
122 pos->seg_off = latest.seg_off;
123 *seq = latest.header.seq;
124 *index = (latest_addr - pos_onmedia);
125
126 return 0;
127 }
128
cache_info_set_seg_id(struct pcache_cache * cache,u32 seg_id)129 static inline void cache_info_set_seg_id(struct pcache_cache *cache, u32 seg_id)
130 {
131 cache->cache_info.seg_id = seg_id;
132 }
133
cache_init(struct dm_pcache * pcache)134 static int cache_init(struct dm_pcache *pcache)
135 {
136 struct pcache_cache *cache = &pcache->cache;
137 struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
138 struct pcache_cache_dev *cache_dev = &pcache->cache_dev;
139 int ret;
140
141 cache->segments = kvcalloc(cache_dev->seg_num, sizeof(struct pcache_cache_segment), GFP_KERNEL);
142 if (!cache->segments) {
143 ret = -ENOMEM;
144 goto err;
145 }
146
147 cache->seg_map = kvcalloc(BITS_TO_LONGS(cache_dev->seg_num), sizeof(unsigned long), GFP_KERNEL);
148 if (!cache->seg_map) {
149 ret = -ENOMEM;
150 goto free_segments;
151 }
152
153 cache->backing_dev = backing_dev;
154 cache->cache_dev = &pcache->cache_dev;
155 cache->n_segs = cache_dev->seg_num;
156 atomic_set(&cache->gc_errors, 0);
157 spin_lock_init(&cache->seg_map_lock);
158 spin_lock_init(&cache->key_head_lock);
159
160 mutex_init(&cache->cache_info_lock);
161 mutex_init(&cache->key_tail_lock);
162 mutex_init(&cache->dirty_tail_lock);
163 mutex_init(&cache->writeback_lock);
164
165 INIT_DELAYED_WORK(&cache->writeback_work, cache_writeback_fn);
166 INIT_DELAYED_WORK(&cache->gc_work, pcache_cache_gc_fn);
167 INIT_WORK(&cache->clean_work, clean_fn);
168
169 return 0;
170
171 free_segments:
172 kvfree(cache->segments);
173 err:
174 return ret;
175 }
176
cache_exit(struct pcache_cache * cache)177 static void cache_exit(struct pcache_cache *cache)
178 {
179 kvfree(cache->seg_map);
180 kvfree(cache->segments);
181 }
182
cache_info_init_default(struct pcache_cache * cache)183 static void cache_info_init_default(struct pcache_cache *cache)
184 {
185 struct pcache_cache_info *cache_info = &cache->cache_info;
186
187 memset(cache_info, 0, sizeof(*cache_info));
188 cache_info->n_segs = cache->cache_dev->seg_num;
189 cache_info_set_gc_percent(cache_info, PCACHE_CACHE_GC_PERCENT_DEFAULT);
190 }
191
cache_tail_init(struct pcache_cache * cache)192 static int cache_tail_init(struct pcache_cache *cache)
193 {
194 struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
195 bool new_cache = !(cache->cache_info.flags & PCACHE_CACHE_FLAGS_INIT_DONE);
196
197 if (new_cache) {
198 __set_bit(0, cache->seg_map);
199
200 cache->key_head.cache_seg = &cache->segments[0];
201 cache->key_head.seg_off = 0;
202 cache_pos_copy(&cache->key_tail, &cache->key_head);
203 cache_pos_copy(&cache->dirty_tail, &cache->key_head);
204
205 cache_encode_dirty_tail(cache);
206 cache_encode_key_tail(cache);
207 } else {
208 if (cache_decode_key_tail(cache) || cache_decode_dirty_tail(cache)) {
209 pcache_dev_err(pcache, "Corrupted key tail or dirty tail.\n");
210 return -EIO;
211 }
212 }
213
214 return 0;
215 }
216
get_seg_id(struct pcache_cache * cache,struct pcache_cache_segment * prev_cache_seg,bool new_cache,u32 * seg_id)217 static int get_seg_id(struct pcache_cache *cache,
218 struct pcache_cache_segment *prev_cache_seg,
219 bool new_cache, u32 *seg_id)
220 {
221 struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
222 struct pcache_cache_dev *cache_dev = cache->cache_dev;
223 int ret;
224
225 if (new_cache) {
226 ret = cache_dev_get_empty_segment_id(cache_dev, seg_id);
227 if (ret) {
228 pcache_dev_err(pcache, "no available segment\n");
229 goto err;
230 }
231
232 if (prev_cache_seg)
233 cache_seg_set_next_seg(prev_cache_seg, *seg_id);
234 else
235 cache_info_set_seg_id(cache, *seg_id);
236 } else {
237 if (prev_cache_seg) {
238 struct pcache_segment_info *prev_seg_info;
239
240 prev_seg_info = &prev_cache_seg->cache_seg_info;
241 if (!segment_info_has_next(prev_seg_info)) {
242 ret = -EFAULT;
243 goto err;
244 }
245 *seg_id = prev_cache_seg->cache_seg_info.next_seg;
246 } else {
247 *seg_id = cache->cache_info.seg_id;
248 }
249 }
250 return 0;
251 err:
252 return ret;
253 }
254
cache_segs_init(struct pcache_cache * cache)255 static int cache_segs_init(struct pcache_cache *cache)
256 {
257 struct pcache_cache_segment *prev_cache_seg = NULL;
258 struct pcache_cache_info *cache_info = &cache->cache_info;
259 bool new_cache = !(cache->cache_info.flags & PCACHE_CACHE_FLAGS_INIT_DONE);
260 u32 seg_id;
261 int ret;
262 u32 i;
263
264 for (i = 0; i < cache_info->n_segs; i++) {
265 ret = get_seg_id(cache, prev_cache_seg, new_cache, &seg_id);
266 if (ret)
267 goto err;
268
269 ret = cache_seg_init(cache, seg_id, i, new_cache);
270 if (ret)
271 goto err;
272
273 prev_cache_seg = &cache->segments[i];
274 }
275 return 0;
276 err:
277 return ret;
278 }
279
cache_init_req_keys(struct pcache_cache * cache,u32 n_paral)280 static int cache_init_req_keys(struct pcache_cache *cache, u32 n_paral)
281 {
282 struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
283 u32 n_subtrees;
284 int ret;
285 u32 i, cpu;
286
287 /* Calculate number of cache trees based on the device size */
288 n_subtrees = DIV_ROUND_UP(cache->dev_size << SECTOR_SHIFT, PCACHE_CACHE_SUBTREE_SIZE);
289 ret = cache_tree_init(cache, &cache->req_key_tree, n_subtrees);
290 if (ret)
291 goto err;
292
293 cache->n_ksets = n_paral;
294 cache->ksets = kvcalloc(cache->n_ksets, PCACHE_KSET_SIZE, GFP_KERNEL);
295 if (!cache->ksets) {
296 ret = -ENOMEM;
297 goto req_tree_exit;
298 }
299
300 /*
301 * Initialize each kset with a spinlock and delayed work for flushing.
302 * Each kset is associated with one queue to ensure independent handling
303 * of cache keys across multiple queues, maximizing multiqueue concurrency.
304 */
305 for (i = 0; i < cache->n_ksets; i++) {
306 struct pcache_cache_kset *kset = get_kset(cache, i);
307
308 kset->cache = cache;
309 spin_lock_init(&kset->kset_lock);
310 INIT_DELAYED_WORK(&kset->flush_work, kset_flush_fn);
311 }
312
313 cache->data_heads = alloc_percpu(struct pcache_cache_data_head);
314 if (!cache->data_heads) {
315 ret = -ENOMEM;
316 goto free_kset;
317 }
318
319 for_each_possible_cpu(cpu) {
320 struct pcache_cache_data_head *h =
321 per_cpu_ptr(cache->data_heads, cpu);
322 h->head_pos.cache_seg = NULL;
323 }
324
325 /*
326 * Replay persisted cache keys using cache_replay.
327 * This function loads and replays cache keys from previously stored
328 * ksets, allowing the cache to restore its state after a restart.
329 */
330 ret = cache_replay(cache);
331 if (ret) {
332 pcache_dev_err(pcache, "failed to replay keys\n");
333 goto free_heads;
334 }
335
336 return 0;
337
338 free_heads:
339 free_percpu(cache->data_heads);
340 free_kset:
341 kvfree(cache->ksets);
342 req_tree_exit:
343 cache_tree_exit(&cache->req_key_tree);
344 err:
345 return ret;
346 }
347
cache_destroy_req_keys(struct pcache_cache * cache)348 static void cache_destroy_req_keys(struct pcache_cache *cache)
349 {
350 u32 i;
351
352 for (i = 0; i < cache->n_ksets; i++) {
353 struct pcache_cache_kset *kset = get_kset(cache, i);
354
355 cancel_delayed_work_sync(&kset->flush_work);
356 }
357
358 free_percpu(cache->data_heads);
359 kvfree(cache->ksets);
360 cache_tree_exit(&cache->req_key_tree);
361 }
362
pcache_cache_start(struct dm_pcache * pcache)363 int pcache_cache_start(struct dm_pcache *pcache)
364 {
365 struct pcache_backing_dev *backing_dev = &pcache->backing_dev;
366 struct pcache_cache *cache = &pcache->cache;
367 struct pcache_cache_options *opts = &pcache->opts;
368 int ret;
369
370 ret = cache_init(pcache);
371 if (ret)
372 return ret;
373
374 cache->cache_info_addr = CACHE_DEV_CACHE_INFO(cache->cache_dev);
375 cache->cache_ctrl = CACHE_DEV_CACHE_CTRL(cache->cache_dev);
376 backing_dev->cache = cache;
377 cache->dev_size = backing_dev->dev_size;
378
379 ret = cache_info_init(cache, opts);
380 if (ret)
381 goto cache_exit;
382
383 ret = cache_segs_init(cache);
384 if (ret)
385 goto cache_exit;
386
387 ret = cache_tail_init(cache);
388 if (ret)
389 goto cache_exit;
390
391 ret = cache_init_req_keys(cache, num_online_cpus());
392 if (ret)
393 goto cache_exit;
394
395 ret = cache_writeback_init(cache);
396 if (ret)
397 goto destroy_keys;
398
399 cache->cache_info.flags |= PCACHE_CACHE_FLAGS_INIT_DONE;
400 cache_info_write(cache);
401 queue_delayed_work(cache_get_wq(cache), &cache->gc_work, 0);
402
403 return 0;
404
405 destroy_keys:
406 cache_destroy_req_keys(cache);
407 cache_exit:
408 cache_exit(cache);
409
410 return ret;
411 }
412
pcache_cache_stop(struct dm_pcache * pcache)413 void pcache_cache_stop(struct dm_pcache *pcache)
414 {
415 struct pcache_cache *cache = &pcache->cache;
416
417 pcache_cache_flush(cache);
418
419 cancel_delayed_work_sync(&cache->gc_work);
420 flush_work(&cache->clean_work);
421 cache_writeback_exit(cache);
422
423 if (cache->req_key_tree.n_subtrees)
424 cache_destroy_req_keys(cache);
425
426 cache_exit(cache);
427 }
428
cache_get_wq(struct pcache_cache * cache)429 struct workqueue_struct *cache_get_wq(struct pcache_cache *cache)
430 {
431 struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
432
433 return pcache->task_wq;
434 }
435
pcache_cache_init(void)436 int pcache_cache_init(void)
437 {
438 key_cache = KMEM_CACHE(pcache_cache_key, 0);
439 if (!key_cache)
440 return -ENOMEM;
441
442 return 0;
443 }
444
pcache_cache_exit(void)445 void pcache_cache_exit(void)
446 {
447 kmem_cache_destroy(key_cache);
448 }
449