xref: /linux/drivers/md/dm-pcache/cache_gc.c (revision 1d57628ff95b32d5cfa8d8f50e07690c161e9cf0)
1*1d57628fSDongsheng Yang // SPDX-License-Identifier: GPL-2.0-or-later
2*1d57628fSDongsheng Yang #include "cache.h"
3*1d57628fSDongsheng Yang #include "backing_dev.h"
4*1d57628fSDongsheng Yang #include "cache_dev.h"
5*1d57628fSDongsheng Yang #include "dm_pcache.h"
6*1d57628fSDongsheng Yang 
7*1d57628fSDongsheng Yang /**
8*1d57628fSDongsheng Yang  * cache_key_gc - Releases the reference of a cache key segment.
9*1d57628fSDongsheng Yang  * @cache: Pointer to the pcache_cache structure.
10*1d57628fSDongsheng Yang  * @key: Pointer to the cache key to be garbage collected.
11*1d57628fSDongsheng Yang  *
12*1d57628fSDongsheng Yang  * This function decrements the reference count of the cache segment
13*1d57628fSDongsheng Yang  * associated with the given key. If the reference count drops to zero,
14*1d57628fSDongsheng Yang  * the segment may be invalidated and reused.
15*1d57628fSDongsheng Yang  */
16*1d57628fSDongsheng Yang static void cache_key_gc(struct pcache_cache *cache, struct pcache_cache_key *key)
17*1d57628fSDongsheng Yang {
18*1d57628fSDongsheng Yang 	cache_seg_put(key->cache_pos.cache_seg);
19*1d57628fSDongsheng Yang }
20*1d57628fSDongsheng Yang 
21*1d57628fSDongsheng Yang static bool need_gc(struct pcache_cache *cache, struct pcache_cache_pos *dirty_tail, struct pcache_cache_pos *key_tail)
22*1d57628fSDongsheng Yang {
23*1d57628fSDongsheng Yang 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
24*1d57628fSDongsheng Yang 	struct pcache_cache_kset_onmedia *kset_onmedia;
25*1d57628fSDongsheng Yang 	void *dirty_addr, *key_addr;
26*1d57628fSDongsheng Yang 	u32 segs_used, segs_gc_threshold, to_copy;
27*1d57628fSDongsheng Yang 	int ret;
28*1d57628fSDongsheng Yang 
29*1d57628fSDongsheng Yang 	dirty_addr = cache_pos_addr(dirty_tail);
30*1d57628fSDongsheng Yang 	key_addr = cache_pos_addr(key_tail);
31*1d57628fSDongsheng Yang 	if (dirty_addr == key_addr) {
32*1d57628fSDongsheng Yang 		pcache_dev_debug(pcache, "key tail is equal to dirty tail: %u:%u\n",
33*1d57628fSDongsheng Yang 				dirty_tail->cache_seg->cache_seg_id,
34*1d57628fSDongsheng Yang 				dirty_tail->seg_off);
35*1d57628fSDongsheng Yang 		return false;
36*1d57628fSDongsheng Yang 	}
37*1d57628fSDongsheng Yang 
38*1d57628fSDongsheng Yang 	kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->gc_kset_onmedia_buf;
39*1d57628fSDongsheng Yang 
40*1d57628fSDongsheng Yang 	to_copy = min(PCACHE_KSET_ONMEDIA_SIZE_MAX, PCACHE_SEG_SIZE - key_tail->seg_off);
41*1d57628fSDongsheng Yang 	ret = copy_mc_to_kernel(kset_onmedia, key_addr, to_copy);
42*1d57628fSDongsheng Yang 	if (ret) {
43*1d57628fSDongsheng Yang 		pcache_dev_err(pcache, "error to read kset: %d", ret);
44*1d57628fSDongsheng Yang 		return false;
45*1d57628fSDongsheng Yang 	}
46*1d57628fSDongsheng Yang 
47*1d57628fSDongsheng Yang 	/* Check if kset_onmedia is corrupted */
48*1d57628fSDongsheng Yang 	if (kset_onmedia->magic != PCACHE_KSET_MAGIC) {
49*1d57628fSDongsheng Yang 		pcache_dev_debug(pcache, "gc error: magic is not as expected. key_tail: %u:%u magic: %llx, expected: %llx\n",
50*1d57628fSDongsheng Yang 					key_tail->cache_seg->cache_seg_id, key_tail->seg_off,
51*1d57628fSDongsheng Yang 					kset_onmedia->magic, PCACHE_KSET_MAGIC);
52*1d57628fSDongsheng Yang 		return false;
53*1d57628fSDongsheng Yang 	}
54*1d57628fSDongsheng Yang 
55*1d57628fSDongsheng Yang 	/* Verify the CRC of the kset_onmedia */
56*1d57628fSDongsheng Yang 	if (kset_onmedia->crc != cache_kset_crc(kset_onmedia)) {
57*1d57628fSDongsheng Yang 		pcache_dev_debug(pcache, "gc error: crc is not as expected. crc: %x, expected: %x\n",
58*1d57628fSDongsheng Yang 					cache_kset_crc(kset_onmedia), kset_onmedia->crc);
59*1d57628fSDongsheng Yang 		return false;
60*1d57628fSDongsheng Yang 	}
61*1d57628fSDongsheng Yang 
62*1d57628fSDongsheng Yang 	segs_used = bitmap_weight(cache->seg_map, cache->n_segs);
63*1d57628fSDongsheng Yang 	segs_gc_threshold = cache->n_segs * pcache_cache_get_gc_percent(cache) / 100;
64*1d57628fSDongsheng Yang 	if (segs_used < segs_gc_threshold) {
65*1d57628fSDongsheng Yang 		pcache_dev_debug(pcache, "segs_used: %u, segs_gc_threshold: %u\n", segs_used, segs_gc_threshold);
66*1d57628fSDongsheng Yang 		return false;
67*1d57628fSDongsheng Yang 	}
68*1d57628fSDongsheng Yang 
69*1d57628fSDongsheng Yang 	return true;
70*1d57628fSDongsheng Yang }
71*1d57628fSDongsheng Yang 
72*1d57628fSDongsheng Yang /**
73*1d57628fSDongsheng Yang  * last_kset_gc - Advances the garbage collection for the last kset.
74*1d57628fSDongsheng Yang  * @cache: Pointer to the pcache_cache structure.
75*1d57628fSDongsheng Yang  * @kset_onmedia: Pointer to the kset_onmedia structure for the last kset.
76*1d57628fSDongsheng Yang  */
77*1d57628fSDongsheng Yang static void last_kset_gc(struct pcache_cache *cache, struct pcache_cache_kset_onmedia *kset_onmedia)
78*1d57628fSDongsheng Yang {
79*1d57628fSDongsheng Yang 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
80*1d57628fSDongsheng Yang 	struct pcache_cache_segment *cur_seg, *next_seg;
81*1d57628fSDongsheng Yang 
82*1d57628fSDongsheng Yang 	cur_seg = cache->key_tail.cache_seg;
83*1d57628fSDongsheng Yang 
84*1d57628fSDongsheng Yang 	next_seg = &cache->segments[kset_onmedia->next_cache_seg_id];
85*1d57628fSDongsheng Yang 
86*1d57628fSDongsheng Yang 	mutex_lock(&cache->key_tail_lock);
87*1d57628fSDongsheng Yang 	cache->key_tail.cache_seg = next_seg;
88*1d57628fSDongsheng Yang 	cache->key_tail.seg_off = 0;
89*1d57628fSDongsheng Yang 	cache_encode_key_tail(cache);
90*1d57628fSDongsheng Yang 	mutex_unlock(&cache->key_tail_lock);
91*1d57628fSDongsheng Yang 
92*1d57628fSDongsheng Yang 	pcache_dev_debug(pcache, "gc advance kset seg: %u\n", cur_seg->cache_seg_id);
93*1d57628fSDongsheng Yang 
94*1d57628fSDongsheng Yang 	spin_lock(&cache->seg_map_lock);
95*1d57628fSDongsheng Yang 	__clear_bit(cur_seg->cache_seg_id, cache->seg_map);
96*1d57628fSDongsheng Yang 	spin_unlock(&cache->seg_map_lock);
97*1d57628fSDongsheng Yang }
98*1d57628fSDongsheng Yang 
99*1d57628fSDongsheng Yang void pcache_cache_gc_fn(struct work_struct *work)
100*1d57628fSDongsheng Yang {
101*1d57628fSDongsheng Yang 	struct pcache_cache *cache = container_of(work, struct pcache_cache, gc_work.work);
102*1d57628fSDongsheng Yang 	struct dm_pcache *pcache = CACHE_TO_PCACHE(cache);
103*1d57628fSDongsheng Yang 	struct pcache_cache_pos dirty_tail, key_tail;
104*1d57628fSDongsheng Yang 	struct pcache_cache_kset_onmedia *kset_onmedia;
105*1d57628fSDongsheng Yang 	struct pcache_cache_key_onmedia *key_onmedia;
106*1d57628fSDongsheng Yang 	struct pcache_cache_key *key;
107*1d57628fSDongsheng Yang 	int ret;
108*1d57628fSDongsheng Yang 	int i;
109*1d57628fSDongsheng Yang 
110*1d57628fSDongsheng Yang 	kset_onmedia = (struct pcache_cache_kset_onmedia *)cache->gc_kset_onmedia_buf;
111*1d57628fSDongsheng Yang 
112*1d57628fSDongsheng Yang 	while (true) {
113*1d57628fSDongsheng Yang 		if (pcache_is_stopping(pcache) || atomic_read(&cache->gc_errors))
114*1d57628fSDongsheng Yang 			return;
115*1d57628fSDongsheng Yang 
116*1d57628fSDongsheng Yang 		/* Get new tail positions */
117*1d57628fSDongsheng Yang 		mutex_lock(&cache->dirty_tail_lock);
118*1d57628fSDongsheng Yang 		cache_pos_copy(&dirty_tail, &cache->dirty_tail);
119*1d57628fSDongsheng Yang 		mutex_unlock(&cache->dirty_tail_lock);
120*1d57628fSDongsheng Yang 
121*1d57628fSDongsheng Yang 		mutex_lock(&cache->key_tail_lock);
122*1d57628fSDongsheng Yang 		cache_pos_copy(&key_tail, &cache->key_tail);
123*1d57628fSDongsheng Yang 		mutex_unlock(&cache->key_tail_lock);
124*1d57628fSDongsheng Yang 
125*1d57628fSDongsheng Yang 		if (!need_gc(cache, &dirty_tail, &key_tail))
126*1d57628fSDongsheng Yang 			break;
127*1d57628fSDongsheng Yang 
128*1d57628fSDongsheng Yang 		if (kset_onmedia->flags & PCACHE_KSET_FLAGS_LAST) {
129*1d57628fSDongsheng Yang 			/* Don't move to the next segment if dirty_tail has not moved */
130*1d57628fSDongsheng Yang 			if (dirty_tail.cache_seg == key_tail.cache_seg)
131*1d57628fSDongsheng Yang 				break;
132*1d57628fSDongsheng Yang 
133*1d57628fSDongsheng Yang 			last_kset_gc(cache, kset_onmedia);
134*1d57628fSDongsheng Yang 			continue;
135*1d57628fSDongsheng Yang 		}
136*1d57628fSDongsheng Yang 
137*1d57628fSDongsheng Yang 		for (i = 0; i < kset_onmedia->key_num; i++) {
138*1d57628fSDongsheng Yang 			struct pcache_cache_key key_tmp = { 0 };
139*1d57628fSDongsheng Yang 
140*1d57628fSDongsheng Yang 			key_onmedia = &kset_onmedia->data[i];
141*1d57628fSDongsheng Yang 
142*1d57628fSDongsheng Yang 			key = &key_tmp;
143*1d57628fSDongsheng Yang 			cache_key_init(&cache->req_key_tree, key);
144*1d57628fSDongsheng Yang 
145*1d57628fSDongsheng Yang 			ret = cache_key_decode(cache, key_onmedia, key);
146*1d57628fSDongsheng Yang 			if (ret) {
147*1d57628fSDongsheng Yang 				/* return without re-arm gc work, and prevent future
148*1d57628fSDongsheng Yang 				 * gc, because we can't retry the partial-gc-ed kset
149*1d57628fSDongsheng Yang 				 */
150*1d57628fSDongsheng Yang 				atomic_inc(&cache->gc_errors);
151*1d57628fSDongsheng Yang 				pcache_dev_err(pcache, "failed to decode cache key in gc\n");
152*1d57628fSDongsheng Yang 				return;
153*1d57628fSDongsheng Yang 			}
154*1d57628fSDongsheng Yang 
155*1d57628fSDongsheng Yang 			cache_key_gc(cache, key);
156*1d57628fSDongsheng Yang 		}
157*1d57628fSDongsheng Yang 
158*1d57628fSDongsheng Yang 		pcache_dev_debug(pcache, "gc advance: %u:%u %u\n",
159*1d57628fSDongsheng Yang 			key_tail.cache_seg->cache_seg_id,
160*1d57628fSDongsheng Yang 			key_tail.seg_off,
161*1d57628fSDongsheng Yang 			get_kset_onmedia_size(kset_onmedia));
162*1d57628fSDongsheng Yang 
163*1d57628fSDongsheng Yang 		mutex_lock(&cache->key_tail_lock);
164*1d57628fSDongsheng Yang 		cache_pos_advance(&cache->key_tail, get_kset_onmedia_size(kset_onmedia));
165*1d57628fSDongsheng Yang 		cache_encode_key_tail(cache);
166*1d57628fSDongsheng Yang 		mutex_unlock(&cache->key_tail_lock);
167*1d57628fSDongsheng Yang 	}
168*1d57628fSDongsheng Yang 
169*1d57628fSDongsheng Yang 	queue_delayed_work(cache_get_wq(cache), &cache->gc_work, PCACHE_CACHE_GC_INTERVAL);
170*1d57628fSDongsheng Yang }
171