1 // SPDX-License-Identifier: GPL-2.0-or-later 2 3 #include "cache_dev.h" 4 #include "cache.h" 5 #include "backing_dev.h" 6 #include "dm_pcache.h" 7 8 static inline struct pcache_segment_info *get_seg_info_addr(struct pcache_cache_segment *cache_seg) 9 { 10 struct pcache_segment_info *seg_info_addr; 11 u32 seg_id = cache_seg->segment.seg_id; 12 void *seg_addr; 13 14 seg_addr = CACHE_DEV_SEGMENT(cache_seg->cache->cache_dev, seg_id); 15 seg_info_addr = seg_addr + PCACHE_SEG_INFO_SIZE * cache_seg->info_index; 16 17 return seg_info_addr; 18 } 19 20 static void cache_seg_info_write(struct pcache_cache_segment *cache_seg) 21 { 22 struct pcache_segment_info *seg_info_addr; 23 struct pcache_segment_info *seg_info = &cache_seg->cache_seg_info; 24 25 mutex_lock(&cache_seg->info_lock); 26 seg_info->header.seq++; 27 seg_info->header.crc = pcache_meta_crc(&seg_info->header, sizeof(struct pcache_segment_info)); 28 29 cache_seg->info_index = (cache_seg->info_index + 1) % PCACHE_META_INDEX_MAX; 30 31 seg_info_addr = get_seg_info_addr(cache_seg); 32 memcpy_flushcache(seg_info_addr, seg_info, sizeof(struct pcache_segment_info)); 33 pmem_wmb(); 34 mutex_unlock(&cache_seg->info_lock); 35 } 36 37 static int cache_seg_info_load(struct pcache_cache_segment *cache_seg) 38 { 39 struct pcache_segment_info *cache_seg_info_addr_base, *cache_seg_info_addr; 40 struct pcache_cache_dev *cache_dev = cache_seg->cache->cache_dev; 41 struct dm_pcache *pcache = CACHE_DEV_TO_PCACHE(cache_dev); 42 u32 seg_id = cache_seg->segment.seg_id; 43 int ret = 0; 44 45 cache_seg_info_addr_base = CACHE_DEV_SEGMENT(cache_dev, seg_id); 46 47 mutex_lock(&cache_seg->info_lock); 48 cache_seg_info_addr = pcache_meta_find_latest(&cache_seg_info_addr_base->header, 49 sizeof(struct pcache_segment_info), 50 PCACHE_SEG_INFO_SIZE, 51 &cache_seg->cache_seg_info); 52 if (IS_ERR(cache_seg_info_addr)) { 53 ret = PTR_ERR(cache_seg_info_addr); 54 goto out; 55 } else if (!cache_seg_info_addr) { 56 ret = -EIO; 57 goto out; 58 } 59 60 cache_seg->info_index = 61 ((char *)cache_seg_info_addr - (char *)cache_seg_info_addr_base) / 62 PCACHE_SEG_INFO_SIZE; 63 out: 64 mutex_unlock(&cache_seg->info_lock); 65 66 if (ret) 67 pcache_dev_err(pcache, "can't read segment info of segment: %u, ret: %d\n", 68 cache_seg->segment.seg_id, ret); 69 return ret; 70 } 71 72 static int cache_seg_ctrl_load(struct pcache_cache_segment *cache_seg) 73 { 74 struct pcache_cache_seg_ctrl *cache_seg_ctrl = cache_seg->cache_seg_ctrl; 75 struct pcache_cache_seg_gen cache_seg_gen, *cache_seg_gen_addr; 76 int ret = 0; 77 78 cache_seg_gen_addr = pcache_meta_find_latest(&cache_seg_ctrl->gen->header, 79 sizeof(struct pcache_cache_seg_gen), 80 sizeof(struct pcache_cache_seg_gen), 81 &cache_seg_gen); 82 if (IS_ERR(cache_seg_gen_addr)) { 83 ret = PTR_ERR(cache_seg_gen_addr); 84 goto out; 85 } 86 87 if (!cache_seg_gen_addr) { 88 cache_seg->gen = 0; 89 cache_seg->gen_seq = 0; 90 cache_seg->gen_index = 0; 91 goto out; 92 } 93 94 cache_seg->gen = cache_seg_gen.gen; 95 cache_seg->gen_seq = cache_seg_gen.header.seq; 96 cache_seg->gen_index = (cache_seg_gen_addr - cache_seg_ctrl->gen); 97 out: 98 99 return ret; 100 } 101 102 static inline struct pcache_cache_seg_gen *get_cache_seg_gen_addr(struct pcache_cache_segment *cache_seg) 103 { 104 struct pcache_cache_seg_ctrl *cache_seg_ctrl = cache_seg->cache_seg_ctrl; 105 106 return (cache_seg_ctrl->gen + cache_seg->gen_index); 107 } 108 109 /* 110 * cache_seg_ctrl_write - write cache segment control information 111 * @seg: the cache segment to update 112 * 113 * This function writes the control information of a cache segment to media. 114 * 115 * Although this updates shared control data, we intentionally do not use 116 * any locking here. All accesses to control information are single-threaded: 117 * 118 * - All reads occur during the init phase, where no concurrent writes 119 * can happen. 120 * - Writes happen once during init and once when the last reference 121 * to the segment is dropped in cache_seg_put(). 122 * 123 * Both cases are guaranteed to be single-threaded, so there is no risk 124 * of concurrent read/write races. 125 */ 126 static void cache_seg_ctrl_write(struct pcache_cache_segment *cache_seg) 127 { 128 struct pcache_cache_seg_gen cache_seg_gen; 129 130 cache_seg_gen.gen = cache_seg->gen; 131 cache_seg_gen.header.seq = ++cache_seg->gen_seq; 132 cache_seg_gen.header.crc = pcache_meta_crc(&cache_seg_gen.header, 133 sizeof(struct pcache_cache_seg_gen)); 134 135 cache_seg->gen_index = (cache_seg->gen_index + 1) % PCACHE_META_INDEX_MAX; 136 137 memcpy_flushcache(get_cache_seg_gen_addr(cache_seg), &cache_seg_gen, sizeof(struct pcache_cache_seg_gen)); 138 pmem_wmb(); 139 } 140 141 static void cache_seg_ctrl_init(struct pcache_cache_segment *cache_seg) 142 { 143 cache_seg->gen = 0; 144 cache_seg->gen_seq = 0; 145 cache_seg->gen_index = 0; 146 cache_seg_ctrl_write(cache_seg); 147 } 148 149 static int cache_seg_meta_load(struct pcache_cache_segment *cache_seg) 150 { 151 int ret; 152 153 ret = cache_seg_info_load(cache_seg); 154 if (ret) 155 goto err; 156 157 ret = cache_seg_ctrl_load(cache_seg); 158 if (ret) 159 goto err; 160 161 return 0; 162 err: 163 return ret; 164 } 165 166 /** 167 * cache_seg_set_next_seg - Sets the ID of the next segment 168 * @cache_seg: Pointer to the cache segment structure. 169 * @seg_id: The segment ID to set as the next segment. 170 * 171 * A pcache_cache allocates multiple cache segments, which are linked together 172 * through next_seg. When loading a pcache_cache, the first cache segment can 173 * be found using cache->seg_id, which allows access to all the cache segments. 174 */ 175 void cache_seg_set_next_seg(struct pcache_cache_segment *cache_seg, u32 seg_id) 176 { 177 cache_seg->cache_seg_info.flags |= PCACHE_SEG_INFO_FLAGS_HAS_NEXT; 178 cache_seg->cache_seg_info.next_seg = seg_id; 179 cache_seg_info_write(cache_seg); 180 } 181 182 int cache_seg_init(struct pcache_cache *cache, u32 seg_id, u32 cache_seg_id, 183 bool new_cache) 184 { 185 struct pcache_cache_dev *cache_dev = cache->cache_dev; 186 struct pcache_cache_segment *cache_seg = &cache->segments[cache_seg_id]; 187 struct pcache_segment_init_options seg_options = { 0 }; 188 struct pcache_segment *segment = &cache_seg->segment; 189 int ret; 190 191 cache_seg->cache = cache; 192 cache_seg->cache_seg_id = cache_seg_id; 193 spin_lock_init(&cache_seg->gen_lock); 194 atomic_set(&cache_seg->refs, 0); 195 mutex_init(&cache_seg->info_lock); 196 197 /* init pcache_segment */ 198 seg_options.type = PCACHE_SEGMENT_TYPE_CACHE_DATA; 199 seg_options.data_off = PCACHE_CACHE_SEG_CTRL_OFF + PCACHE_CACHE_SEG_CTRL_SIZE; 200 seg_options.seg_id = seg_id; 201 seg_options.seg_info = &cache_seg->cache_seg_info; 202 pcache_segment_init(cache_dev, segment, &seg_options); 203 204 cache_seg->cache_seg_ctrl = CACHE_DEV_SEGMENT(cache_dev, seg_id) + PCACHE_CACHE_SEG_CTRL_OFF; 205 206 if (new_cache) { 207 cache_dev_zero_range(cache_dev, CACHE_DEV_SEGMENT(cache_dev, seg_id), 208 PCACHE_SEG_INFO_SIZE * PCACHE_META_INDEX_MAX + 209 PCACHE_CACHE_SEG_CTRL_SIZE); 210 211 cache_seg_ctrl_init(cache_seg); 212 213 cache_seg->info_index = 0; 214 cache_seg_info_write(cache_seg); 215 216 /* clear outdated kset in segment */ 217 memcpy_flushcache(segment->data, &pcache_empty_kset, sizeof(struct pcache_cache_kset_onmedia)); 218 pmem_wmb(); 219 } else { 220 ret = cache_seg_meta_load(cache_seg); 221 if (ret) 222 goto err; 223 } 224 225 return 0; 226 err: 227 return ret; 228 } 229 230 /** 231 * get_cache_segment - Retrieves a free cache segment from the cache. 232 * @cache: Pointer to the cache structure. 233 * 234 * This function attempts to find a free cache segment that can be used. 235 * It locks the segment map and checks for the next available segment ID. 236 * If a free segment is found, it initializes it and returns a pointer to the 237 * cache segment structure. Returns NULL if no segments are available. 238 */ 239 struct pcache_cache_segment *get_cache_segment(struct pcache_cache *cache) 240 { 241 struct pcache_cache_segment *cache_seg; 242 u32 seg_id; 243 244 spin_lock(&cache->seg_map_lock); 245 again: 246 seg_id = find_next_zero_bit(cache->seg_map, cache->n_segs, cache->last_cache_seg); 247 if (seg_id == cache->n_segs) { 248 /* reset the hint of ->last_cache_seg and retry */ 249 if (cache->last_cache_seg) { 250 cache->last_cache_seg = 0; 251 goto again; 252 } 253 cache->cache_full = true; 254 spin_unlock(&cache->seg_map_lock); 255 return NULL; 256 } 257 258 /* 259 * found an available cache_seg, mark it used in seg_map 260 * and update the search hint ->last_cache_seg 261 */ 262 __set_bit(seg_id, cache->seg_map); 263 cache->last_cache_seg = seg_id; 264 spin_unlock(&cache->seg_map_lock); 265 266 cache_seg = &cache->segments[seg_id]; 267 cache_seg->cache_seg_id = seg_id; 268 269 return cache_seg; 270 } 271 272 static void cache_seg_gen_increase(struct pcache_cache_segment *cache_seg) 273 { 274 spin_lock(&cache_seg->gen_lock); 275 cache_seg->gen++; 276 spin_unlock(&cache_seg->gen_lock); 277 278 cache_seg_ctrl_write(cache_seg); 279 } 280 281 void cache_seg_get(struct pcache_cache_segment *cache_seg) 282 { 283 atomic_inc(&cache_seg->refs); 284 } 285 286 static void cache_seg_invalidate(struct pcache_cache_segment *cache_seg) 287 { 288 struct pcache_cache *cache; 289 290 cache = cache_seg->cache; 291 cache_seg_gen_increase(cache_seg); 292 293 spin_lock(&cache->seg_map_lock); 294 if (cache->cache_full) 295 cache->cache_full = false; 296 __clear_bit(cache_seg->cache_seg_id, cache->seg_map); 297 spin_unlock(&cache->seg_map_lock); 298 299 pcache_defer_reqs_kick(CACHE_TO_PCACHE(cache)); 300 /* clean_work will clean the bad key in key_tree*/ 301 queue_work(cache_get_wq(cache), &cache->clean_work); 302 } 303 304 void cache_seg_put(struct pcache_cache_segment *cache_seg) 305 { 306 if (atomic_dec_and_test(&cache_seg->refs)) 307 cache_seg_invalidate(cache_seg); 308 } 309