xref: /linux/drivers/md/dm-pcache/cache_gc.c (revision 55a42f78ffd386e01a5404419f8c5ded7db70a21)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 #include "cache.h"
3 #include "backing_dev.h"
4 #include "cache_dev.h"
5 #include "dm_pcache.h"
6 
7 /**
8  * cache_key_gc - Releases the reference of a cache key segment.
9  * @cache: Pointer to the pcache_cache structure.
10  * @key: Pointer to the cache key to be garbage collected.
11  *
12  * This function decrements the reference count of the cache segment
13  * associated with the given key. If the reference count drops to zero,
14  * the segment may be invalidated and reused.
15  */
16 static void cache_key_gc(struct pcache_cache *cache, struct pcache_cache_key *key)
17 {
18 	cache_seg_put(key->cache_pos.cache_seg);
19 }
20 
21 static bool need_gc(struct pcache_cache *cache, struct pcache_cache_pos *dirty_tail, struct pcache_cache_pos *key_tail)
22 {
23 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
24 	struct pcache_cache_kset_onmedia *kset_onmedia;
25 	void *dirty_addr, *key_addr;
26 	u32 segs_used, segs_gc_threshold, to_copy;
27 	int ret;
28 
29 	dirty_addr = cache_pos_addr(dirty_tail);
30 	key_addr = cache_pos_addr(key_tail);
31 	if (dirty_addr == key_addr) {
32 		pcache_dev_debug(pcache, "key tail is equal to dirty tail: %u:%u\n",
33 				dirty_tail->cache_seg->cache_seg_id,
34 				dirty_tail->seg_off);
35 		return false;
36 	}
37 
38 	kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->gc_kset_onmedia_buf;
39 
40 	to_copy = min(PCACHE_KSET_ONMEDIA_SIZE_MAX, PCACHE_SEG_SIZE - key_tail->seg_off);
41 	ret = copy_mc_to_kernel(kset_onmedia, key_addr, to_copy);
42 	if (ret) {
43 		pcache_dev_err(pcache, "error to read kset: %d", ret);
44 		return false;
45 	}
46 
47 	/* Check if kset_onmedia is corrupted */
48 	if (kset_onmedia->magic != PCACHE_KSET_MAGIC) {
49 		pcache_dev_debug(pcache, "gc error: magic is not as expected. key_tail: %u:%u magic: %llx, expected: %llx\n",
50 					key_tail->cache_seg->cache_seg_id, key_tail->seg_off,
51 					kset_onmedia->magic, PCACHE_KSET_MAGIC);
52 		return false;
53 	}
54 
55 	/* Verify the CRC of the kset_onmedia */
56 	if (kset_onmedia->crc != cache_kset_crc(kset_onmedia)) {
57 		pcache_dev_debug(pcache, "gc error: crc is not as expected. crc: %x, expected: %x\n",
58 					cache_kset_crc(kset_onmedia), kset_onmedia->crc);
59 		return false;
60 	}
61 
62 	segs_used = bitmap_weight(cache->seg_map, cache->n_segs);
63 	segs_gc_threshold = cache->n_segs * pcache_cache_get_gc_percent(cache) / 100;
64 	if (segs_used < segs_gc_threshold) {
65 		pcache_dev_debug(pcache, "segs_used: %u, segs_gc_threshold: %u\n", segs_used, segs_gc_threshold);
66 		return false;
67 	}
68 
69 	return true;
70 }
71 
72 /**
73  * last_kset_gc - Advances the garbage collection for the last kset.
74  * @cache: Pointer to the pcache_cache structure.
75  * @kset_onmedia: Pointer to the kset_onmedia structure for the last kset.
76  */
77 static void last_kset_gc(struct pcache_cache *cache, struct pcache_cache_kset_onmedia *kset_onmedia)
78 {
79 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
80 	struct pcache_cache_segment *cur_seg, *next_seg;
81 
82 	cur_seg = cache->key_tail.cache_seg;
83 
84 	next_seg = &cache->segments[kset_onmedia->next_cache_seg_id];
85 
86 	mutex_lock(&cache->key_tail_lock);
87 	cache->key_tail.cache_seg = next_seg;
88 	cache->key_tail.seg_off = 0;
89 	cache_encode_key_tail(cache);
90 	mutex_unlock(&cache->key_tail_lock);
91 
92 	pcache_dev_debug(pcache, "gc advance kset seg: %u\n", cur_seg->cache_seg_id);
93 
94 	spin_lock(&cache->seg_map_lock);
95 	__clear_bit(cur_seg->cache_seg_id, cache->seg_map);
96 	spin_unlock(&cache->seg_map_lock);
97 }
98 
99 void pcache_cache_gc_fn(struct work_struct *work)
100 {
101 	struct pcache_cache *cache = container_of(work, struct pcache_cache, gc_work.work);
102 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
103 	struct pcache_cache_pos dirty_tail, key_tail;
104 	struct pcache_cache_kset_onmedia *kset_onmedia;
105 	struct pcache_cache_key_onmedia *key_onmedia;
106 	struct pcache_cache_key *key;
107 	int ret;
108 	int i;
109 
110 	kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->gc_kset_onmedia_buf;
111 
112 	while (true) {
113 		if (pcache_is_stopping(pcache) || atomic_read(&cache->gc_errors))
114 			return;
115 
116 		/* Get new tail positions */
117 		mutex_lock(&cache->dirty_tail_lock);
118 		cache_pos_copy(&dirty_tail, &cache->dirty_tail);
119 		mutex_unlock(&cache->dirty_tail_lock);
120 
121 		mutex_lock(&cache->key_tail_lock);
122 		cache_pos_copy(&key_tail, &cache->key_tail);
123 		mutex_unlock(&cache->key_tail_lock);
124 
125 		if (!need_gc(cache, &dirty_tail, &key_tail))
126 			break;
127 
128 		if (kset_onmedia->flags & PCACHE_KSET_FLAGS_LAST) {
129 			/* Don't move to the next segment if dirty_tail has not moved */
130 			if (dirty_tail.cache_seg == key_tail.cache_seg)
131 				break;
132 
133 			last_kset_gc(cache, kset_onmedia);
134 			continue;
135 		}
136 
137 		for (i = 0; i < kset_onmedia->key_num; i++) {
138 			struct pcache_cache_key key_tmp = { 0 };
139 
140 			key_onmedia = &kset_onmedia->data[i];
141 
142 			key = &key_tmp;
143 			cache_key_init(&cache->req_key_tree, key);
144 
145 			ret = cache_key_decode(cache, key_onmedia, key);
146 			if (ret) {
147 				/* return without re-arm gc work, and prevent future
148 				 * gc, because we can't retry the partial-gc-ed kset
149 				 */
150 				atomic_inc(&cache->gc_errors);
151 				pcache_dev_err(pcache, "failed to decode cache key in gc\n");
152 				return;
153 			}
154 
155 			cache_key_gc(cache, key);
156 		}
157 
158 		pcache_dev_debug(pcache, "gc advance: %u:%u %u\n",
159 			key_tail.cache_seg->cache_seg_id,
160 			key_tail.seg_off,
161 			get_kset_onmedia_size(kset_onmedia));
162 
163 		mutex_lock(&cache->key_tail_lock);
164 		cache_pos_advance(&cache->key_tail, get_kset_onmedia_size(kset_onmedia));
165 		cache_encode_key_tail(cache);
166 		mutex_unlock(&cache->key_tail_lock);
167 	}
168 
169 	queue_delayed_work(cache_get_wq(cache), &cache->gc_work, PCACHE_CACHE_GC_INTERVAL);
170 }
171