xref: /linux/drivers/md/dm-pcache/cache_segment.c (revision 4f38da1f027ea2c9f01bb71daa7a299c191b6940)
11d57628fSDongsheng Yang // SPDX-License-Identifier: GPL-2.0-or-later
21d57628fSDongsheng Yang 
31d57628fSDongsheng Yang #include "cache_dev.h"
41d57628fSDongsheng Yang #include "cache.h"
51d57628fSDongsheng Yang #include "backing_dev.h"
61d57628fSDongsheng Yang #include "dm_pcache.h"
71d57628fSDongsheng Yang 
81d57628fSDongsheng Yang static inline struct pcache_segment_info *get_seg_info_addr(struct pcache_cache_segment *cache_seg)
91d57628fSDongsheng Yang {
101d57628fSDongsheng Yang 	struct pcache_segment_info *seg_info_addr;
111d57628fSDongsheng Yang 	u32 seg_id = cache_seg->segment.seg_id;
121d57628fSDongsheng Yang 	void *seg_addr;
131d57628fSDongsheng Yang 
141d57628fSDongsheng Yang 	seg_addr = CACHE_DEV_SEGMENT(cache_seg->cache->cache_dev, seg_id);
151d57628fSDongsheng Yang 	seg_info_addr = seg_addr + PCACHE_SEG_INFO_SIZE * cache_seg->info_index;
161d57628fSDongsheng Yang 
171d57628fSDongsheng Yang 	return seg_info_addr;
181d57628fSDongsheng Yang }
191d57628fSDongsheng Yang 
201d57628fSDongsheng Yang static void cache_seg_info_write(struct pcache_cache_segment *cache_seg)
211d57628fSDongsheng Yang {
221d57628fSDongsheng Yang 	struct pcache_segment_info *seg_info_addr;
231d57628fSDongsheng Yang 	struct pcache_segment_info *seg_info = &cache_seg->cache_seg_info;
241d57628fSDongsheng Yang 
251d57628fSDongsheng Yang 	mutex_lock(&cache_seg->info_lock);
261d57628fSDongsheng Yang 	seg_info->header.seq++;
271d57628fSDongsheng Yang 	seg_info->header.crc = pcache_meta_crc(&seg_info->header, sizeof(struct pcache_segment_info));
281d57628fSDongsheng Yang 
291d57628fSDongsheng Yang 	seg_info_addr = get_seg_info_addr(cache_seg);
301d57628fSDongsheng Yang 	memcpy_flushcache(seg_info_addr, seg_info, sizeof(struct pcache_segment_info));
311d57628fSDongsheng Yang 	pmem_wmb();
321d57628fSDongsheng Yang 
331d57628fSDongsheng Yang 	cache_seg->info_index = (cache_seg->info_index + 1) % PCACHE_META_INDEX_MAX;
341d57628fSDongsheng Yang 	mutex_unlock(&cache_seg->info_lock);
351d57628fSDongsheng Yang }
361d57628fSDongsheng Yang 
371d57628fSDongsheng Yang static int cache_seg_info_load(struct pcache_cache_segment *cache_seg)
381d57628fSDongsheng Yang {
391d57628fSDongsheng Yang 	struct pcache_segment_info *cache_seg_info_addr_base, *cache_seg_info_addr;
401d57628fSDongsheng Yang 	struct pcache_cache_dev *cache_dev = cache_seg->cache->cache_dev;
411d57628fSDongsheng Yang 	struct dm_pcache *pcache = CACHE_DEV_TO_PCACHE(cache_dev);
421d57628fSDongsheng Yang 	u32 seg_id = cache_seg->segment.seg_id;
431d57628fSDongsheng Yang 	int ret = 0;
441d57628fSDongsheng Yang 
451d57628fSDongsheng Yang 	cache_seg_info_addr_base = CACHE_DEV_SEGMENT(cache_dev, seg_id);
461d57628fSDongsheng Yang 
471d57628fSDongsheng Yang 	mutex_lock(&cache_seg->info_lock);
481d57628fSDongsheng Yang 	cache_seg_info_addr = pcache_meta_find_latest(&cache_seg_info_addr_base->header,
491d57628fSDongsheng Yang 						sizeof(struct pcache_segment_info),
501d57628fSDongsheng Yang 						PCACHE_SEG_INFO_SIZE,
511d57628fSDongsheng Yang 						&cache_seg->cache_seg_info);
521d57628fSDongsheng Yang 	if (IS_ERR(cache_seg_info_addr)) {
531d57628fSDongsheng Yang 		ret = PTR_ERR(cache_seg_info_addr);
541d57628fSDongsheng Yang 		goto out;
551d57628fSDongsheng Yang 	} else if (!cache_seg_info_addr) {
561d57628fSDongsheng Yang 		ret = -EIO;
571d57628fSDongsheng Yang 		goto out;
581d57628fSDongsheng Yang 	}
591d57628fSDongsheng Yang 	cache_seg->info_index = cache_seg_info_addr - cache_seg_info_addr_base;
601d57628fSDongsheng Yang out:
611d57628fSDongsheng Yang 	mutex_unlock(&cache_seg->info_lock);
621d57628fSDongsheng Yang 
631d57628fSDongsheng Yang 	if (ret)
641d57628fSDongsheng Yang 		pcache_dev_err(pcache, "can't read segment info of segment: %u, ret: %d\n",
651d57628fSDongsheng Yang 			      cache_seg->segment.seg_id, ret);
661d57628fSDongsheng Yang 	return ret;
671d57628fSDongsheng Yang }
681d57628fSDongsheng Yang 
691d57628fSDongsheng Yang static int cache_seg_ctrl_load(struct pcache_cache_segment *cache_seg)
701d57628fSDongsheng Yang {
711d57628fSDongsheng Yang 	struct pcache_cache_seg_ctrl *cache_seg_ctrl = cache_seg->cache_seg_ctrl;
721d57628fSDongsheng Yang 	struct pcache_cache_seg_gen cache_seg_gen, *cache_seg_gen_addr;
731d57628fSDongsheng Yang 	int ret = 0;
741d57628fSDongsheng Yang 
751d57628fSDongsheng Yang 	cache_seg_gen_addr = pcache_meta_find_latest(&cache_seg_ctrl->gen->header,
761d57628fSDongsheng Yang 					     sizeof(struct pcache_cache_seg_gen),
771d57628fSDongsheng Yang 					     sizeof(struct pcache_cache_seg_gen),
781d57628fSDongsheng Yang 					     &cache_seg_gen);
791d57628fSDongsheng Yang 	if (IS_ERR(cache_seg_gen_addr)) {
801d57628fSDongsheng Yang 		ret = PTR_ERR(cache_seg_gen_addr);
811d57628fSDongsheng Yang 		goto out;
821d57628fSDongsheng Yang 	}
831d57628fSDongsheng Yang 
841d57628fSDongsheng Yang 	if (!cache_seg_gen_addr) {
851d57628fSDongsheng Yang 		cache_seg->gen = 0;
861d57628fSDongsheng Yang 		cache_seg->gen_seq = 0;
871d57628fSDongsheng Yang 		cache_seg->gen_index = 0;
881d57628fSDongsheng Yang 		goto out;
891d57628fSDongsheng Yang 	}
901d57628fSDongsheng Yang 
911d57628fSDongsheng Yang 	cache_seg->gen = cache_seg_gen.gen;
921d57628fSDongsheng Yang 	cache_seg->gen_seq = cache_seg_gen.header.seq;
931d57628fSDongsheng Yang 	cache_seg->gen_index = (cache_seg_gen_addr - cache_seg_ctrl->gen);
941d57628fSDongsheng Yang out:
951d57628fSDongsheng Yang 
961d57628fSDongsheng Yang 	return ret;
971d57628fSDongsheng Yang }
981d57628fSDongsheng Yang 
991d57628fSDongsheng Yang static inline struct pcache_cache_seg_gen *get_cache_seg_gen_addr(struct pcache_cache_segment *cache_seg)
1001d57628fSDongsheng Yang {
1011d57628fSDongsheng Yang 	struct pcache_cache_seg_ctrl *cache_seg_ctrl = cache_seg->cache_seg_ctrl;
1021d57628fSDongsheng Yang 
1031d57628fSDongsheng Yang 	return (cache_seg_ctrl->gen + cache_seg->gen_index);
1041d57628fSDongsheng Yang }
1051d57628fSDongsheng Yang 
106*1f9ad14aSDongsheng Yang /*
107*1f9ad14aSDongsheng Yang  * cache_seg_ctrl_write - write cache segment control information
108*1f9ad14aSDongsheng Yang  * @seg: the cache segment to update
109*1f9ad14aSDongsheng Yang  *
110*1f9ad14aSDongsheng Yang  * This function writes the control information of a cache segment to media.
111*1f9ad14aSDongsheng Yang  *
112*1f9ad14aSDongsheng Yang  * Although this updates shared control data, we intentionally do not use
113*1f9ad14aSDongsheng Yang  * any locking here.  All accesses to control information are single-threaded:
114*1f9ad14aSDongsheng Yang  *
115*1f9ad14aSDongsheng Yang  *   - All reads occur during the init phase, where no concurrent writes
116*1f9ad14aSDongsheng Yang  *     can happen.
117*1f9ad14aSDongsheng Yang  *   - Writes happen once during init and once when the last reference
118*1f9ad14aSDongsheng Yang  *     to the segment is dropped in cache_seg_put().
119*1f9ad14aSDongsheng Yang  *
120*1f9ad14aSDongsheng Yang  * Both cases are guaranteed to be single-threaded, so there is no risk
121*1f9ad14aSDongsheng Yang  * of concurrent read/write races.
122*1f9ad14aSDongsheng Yang  */
1231d57628fSDongsheng Yang static void cache_seg_ctrl_write(struct pcache_cache_segment *cache_seg)
1241d57628fSDongsheng Yang {
1251d57628fSDongsheng Yang 	struct pcache_cache_seg_gen cache_seg_gen;
1261d57628fSDongsheng Yang 
1271d57628fSDongsheng Yang 	cache_seg_gen.gen = cache_seg->gen;
1281d57628fSDongsheng Yang 	cache_seg_gen.header.seq = ++cache_seg->gen_seq;
1291d57628fSDongsheng Yang 	cache_seg_gen.header.crc = pcache_meta_crc(&cache_seg_gen.header,
1301d57628fSDongsheng Yang 						 sizeof(struct pcache_cache_seg_gen));
1311d57628fSDongsheng Yang 
1321d57628fSDongsheng Yang 	memcpy_flushcache(get_cache_seg_gen_addr(cache_seg), &cache_seg_gen, sizeof(struct pcache_cache_seg_gen));
1331d57628fSDongsheng Yang 	pmem_wmb();
1341d57628fSDongsheng Yang 
1351d57628fSDongsheng Yang 	cache_seg->gen_index = (cache_seg->gen_index + 1) % PCACHE_META_INDEX_MAX;
1361d57628fSDongsheng Yang }
1371d57628fSDongsheng Yang 
1381d57628fSDongsheng Yang static void cache_seg_ctrl_init(struct pcache_cache_segment *cache_seg)
1391d57628fSDongsheng Yang {
1401d57628fSDongsheng Yang 	cache_seg->gen = 0;
1411d57628fSDongsheng Yang 	cache_seg->gen_seq = 0;
1421d57628fSDongsheng Yang 	cache_seg->gen_index = 0;
1431d57628fSDongsheng Yang 	cache_seg_ctrl_write(cache_seg);
1441d57628fSDongsheng Yang }
1451d57628fSDongsheng Yang 
1461d57628fSDongsheng Yang static int cache_seg_meta_load(struct pcache_cache_segment *cache_seg)
1471d57628fSDongsheng Yang {
1481d57628fSDongsheng Yang 	int ret;
1491d57628fSDongsheng Yang 
1501d57628fSDongsheng Yang 	ret = cache_seg_info_load(cache_seg);
1511d57628fSDongsheng Yang 	if (ret)
1521d57628fSDongsheng Yang 		goto err;
1531d57628fSDongsheng Yang 
1541d57628fSDongsheng Yang 	ret = cache_seg_ctrl_load(cache_seg);
1551d57628fSDongsheng Yang 	if (ret)
1561d57628fSDongsheng Yang 		goto err;
1571d57628fSDongsheng Yang 
1581d57628fSDongsheng Yang 	return 0;
1591d57628fSDongsheng Yang err:
1601d57628fSDongsheng Yang 	return ret;
1611d57628fSDongsheng Yang }
1621d57628fSDongsheng Yang 
1631d57628fSDongsheng Yang /**
1641d57628fSDongsheng Yang  * cache_seg_set_next_seg - Sets the ID of the next segment
1651d57628fSDongsheng Yang  * @cache_seg: Pointer to the cache segment structure.
1661d57628fSDongsheng Yang  * @seg_id: The segment ID to set as the next segment.
1671d57628fSDongsheng Yang  *
1681d57628fSDongsheng Yang  * A pcache_cache allocates multiple cache segments, which are linked together
1691d57628fSDongsheng Yang  * through next_seg. When loading a pcache_cache, the first cache segment can
1701d57628fSDongsheng Yang  * be found using cache->seg_id, which allows access to all the cache segments.
1711d57628fSDongsheng Yang  */
1721d57628fSDongsheng Yang void cache_seg_set_next_seg(struct pcache_cache_segment *cache_seg, u32 seg_id)
1731d57628fSDongsheng Yang {
1741d57628fSDongsheng Yang 	cache_seg->cache_seg_info.flags |= PCACHE_SEG_INFO_FLAGS_HAS_NEXT;
1751d57628fSDongsheng Yang 	cache_seg->cache_seg_info.next_seg = seg_id;
1761d57628fSDongsheng Yang 	cache_seg_info_write(cache_seg);
1771d57628fSDongsheng Yang }
1781d57628fSDongsheng Yang 
1791d57628fSDongsheng Yang int cache_seg_init(struct pcache_cache *cache, u32 seg_id, u32 cache_seg_id,
1801d57628fSDongsheng Yang 		   bool new_cache)
1811d57628fSDongsheng Yang {
1821d57628fSDongsheng Yang 	struct pcache_cache_dev *cache_dev = cache->cache_dev;
1831d57628fSDongsheng Yang 	struct pcache_cache_segment *cache_seg = &cache->segments[cache_seg_id];
1841d57628fSDongsheng Yang 	struct pcache_segment_init_options seg_options = { 0 };
1851d57628fSDongsheng Yang 	struct pcache_segment *segment = &cache_seg->segment;
1861d57628fSDongsheng Yang 	int ret;
1871d57628fSDongsheng Yang 
1881d57628fSDongsheng Yang 	cache_seg->cache = cache;
1891d57628fSDongsheng Yang 	cache_seg->cache_seg_id = cache_seg_id;
1901d57628fSDongsheng Yang 	spin_lock_init(&cache_seg->gen_lock);
1911d57628fSDongsheng Yang 	atomic_set(&cache_seg->refs, 0);
1921d57628fSDongsheng Yang 	mutex_init(&cache_seg->info_lock);
1931d57628fSDongsheng Yang 
1941d57628fSDongsheng Yang 	/* init pcache_segment */
1951d57628fSDongsheng Yang 	seg_options.type = PCACHE_SEGMENT_TYPE_CACHE_DATA;
1961d57628fSDongsheng Yang 	seg_options.data_off = PCACHE_CACHE_SEG_CTRL_OFF + PCACHE_CACHE_SEG_CTRL_SIZE;
1971d57628fSDongsheng Yang 	seg_options.seg_id = seg_id;
1981d57628fSDongsheng Yang 	seg_options.seg_info = &cache_seg->cache_seg_info;
1991d57628fSDongsheng Yang 	pcache_segment_init(cache_dev, segment, &seg_options);
2001d57628fSDongsheng Yang 
2011d57628fSDongsheng Yang 	cache_seg->cache_seg_ctrl = CACHE_DEV_SEGMENT(cache_dev, seg_id) + PCACHE_CACHE_SEG_CTRL_OFF;
2021d57628fSDongsheng Yang 
2031d57628fSDongsheng Yang 	if (new_cache) {
2041d57628fSDongsheng Yang 		cache_dev_zero_range(cache_dev, CACHE_DEV_SEGMENT(cache_dev, seg_id),
2051d57628fSDongsheng Yang 				     PCACHE_SEG_INFO_SIZE * PCACHE_META_INDEX_MAX +
2061d57628fSDongsheng Yang 				     PCACHE_CACHE_SEG_CTRL_SIZE);
2071d57628fSDongsheng Yang 
2081d57628fSDongsheng Yang 		cache_seg_ctrl_init(cache_seg);
2091d57628fSDongsheng Yang 
2101d57628fSDongsheng Yang 		cache_seg->info_index = 0;
2111d57628fSDongsheng Yang 		cache_seg_info_write(cache_seg);
2121d57628fSDongsheng Yang 
2131d57628fSDongsheng Yang 		/* clear outdated kset in segment */
2141d57628fSDongsheng Yang 		memcpy_flushcache(segment->data, &pcache_empty_kset, sizeof(struct pcache_cache_kset_onmedia));
2151d57628fSDongsheng Yang 		pmem_wmb();
2161d57628fSDongsheng Yang 	} else {
2171d57628fSDongsheng Yang 		ret = cache_seg_meta_load(cache_seg);
2181d57628fSDongsheng Yang 		if (ret)
2191d57628fSDongsheng Yang 			goto err;
2201d57628fSDongsheng Yang 	}
2211d57628fSDongsheng Yang 
2221d57628fSDongsheng Yang 	return 0;
2231d57628fSDongsheng Yang err:
2241d57628fSDongsheng Yang 	return ret;
2251d57628fSDongsheng Yang }
2261d57628fSDongsheng Yang 
2271d57628fSDongsheng Yang /**
2281d57628fSDongsheng Yang  * get_cache_segment - Retrieves a free cache segment from the cache.
2291d57628fSDongsheng Yang  * @cache: Pointer to the cache structure.
2301d57628fSDongsheng Yang  *
2311d57628fSDongsheng Yang  * This function attempts to find a free cache segment that can be used.
2321d57628fSDongsheng Yang  * It locks the segment map and checks for the next available segment ID.
2331d57628fSDongsheng Yang  * If a free segment is found, it initializes it and returns a pointer to the
2341d57628fSDongsheng Yang  * cache segment structure. Returns NULL if no segments are available.
2351d57628fSDongsheng Yang  */
2361d57628fSDongsheng Yang struct pcache_cache_segment *get_cache_segment(struct pcache_cache *cache)
2371d57628fSDongsheng Yang {
2381d57628fSDongsheng Yang 	struct pcache_cache_segment *cache_seg;
2391d57628fSDongsheng Yang 	u32 seg_id;
2401d57628fSDongsheng Yang 
2411d57628fSDongsheng Yang 	spin_lock(&cache->seg_map_lock);
2421d57628fSDongsheng Yang again:
2431d57628fSDongsheng Yang 	seg_id = find_next_zero_bit(cache->seg_map, cache->n_segs, cache->last_cache_seg);
2441d57628fSDongsheng Yang 	if (seg_id == cache->n_segs) {
2451d57628fSDongsheng Yang 		/* reset the hint of ->last_cache_seg and retry */
2461d57628fSDongsheng Yang 		if (cache->last_cache_seg) {
2471d57628fSDongsheng Yang 			cache->last_cache_seg = 0;
2481d57628fSDongsheng Yang 			goto again;
2491d57628fSDongsheng Yang 		}
2501d57628fSDongsheng Yang 		cache->cache_full = true;
2511d57628fSDongsheng Yang 		spin_unlock(&cache->seg_map_lock);
2521d57628fSDongsheng Yang 		return NULL;
2531d57628fSDongsheng Yang 	}
2541d57628fSDongsheng Yang 
2551d57628fSDongsheng Yang 	/*
2561d57628fSDongsheng Yang 	 * found an available cache_seg, mark it used in seg_map
2571d57628fSDongsheng Yang 	 * and update the search hint ->last_cache_seg
2581d57628fSDongsheng Yang 	 */
2591d57628fSDongsheng Yang 	__set_bit(seg_id, cache->seg_map);
2601d57628fSDongsheng Yang 	cache->last_cache_seg = seg_id;
2611d57628fSDongsheng Yang 	spin_unlock(&cache->seg_map_lock);
2621d57628fSDongsheng Yang 
2631d57628fSDongsheng Yang 	cache_seg = &cache->segments[seg_id];
2641d57628fSDongsheng Yang 	cache_seg->cache_seg_id = seg_id;
2651d57628fSDongsheng Yang 
2661d57628fSDongsheng Yang 	return cache_seg;
2671d57628fSDongsheng Yang }
2681d57628fSDongsheng Yang 
2691d57628fSDongsheng Yang static void cache_seg_gen_increase(struct pcache_cache_segment *cache_seg)
2701d57628fSDongsheng Yang {
2711d57628fSDongsheng Yang 	spin_lock(&cache_seg->gen_lock);
2721d57628fSDongsheng Yang 	cache_seg->gen++;
2731d57628fSDongsheng Yang 	spin_unlock(&cache_seg->gen_lock);
2741d57628fSDongsheng Yang 
2751d57628fSDongsheng Yang 	cache_seg_ctrl_write(cache_seg);
2761d57628fSDongsheng Yang }
2771d57628fSDongsheng Yang 
2781d57628fSDongsheng Yang void cache_seg_get(struct pcache_cache_segment *cache_seg)
2791d57628fSDongsheng Yang {
2801d57628fSDongsheng Yang 	atomic_inc(&cache_seg->refs);
2811d57628fSDongsheng Yang }
2821d57628fSDongsheng Yang 
2831d57628fSDongsheng Yang static void cache_seg_invalidate(struct pcache_cache_segment *cache_seg)
2841d57628fSDongsheng Yang {
2851d57628fSDongsheng Yang 	struct pcache_cache *cache;
2861d57628fSDongsheng Yang 
2871d57628fSDongsheng Yang 	cache = cache_seg->cache;
2881d57628fSDongsheng Yang 	cache_seg_gen_increase(cache_seg);
2891d57628fSDongsheng Yang 
2901d57628fSDongsheng Yang 	spin_lock(&cache->seg_map_lock);
2911d57628fSDongsheng Yang 	if (cache->cache_full)
2921d57628fSDongsheng Yang 		cache->cache_full = false;
2931d57628fSDongsheng Yang 	__clear_bit(cache_seg->cache_seg_id, cache->seg_map);
2941d57628fSDongsheng Yang 	spin_unlock(&cache->seg_map_lock);
2951d57628fSDongsheng Yang 
2961d57628fSDongsheng Yang 	pcache_defer_reqs_kick(CACHE_TO_PCACHE(cache));
2971d57628fSDongsheng Yang 	/* clean_work will clean the bad key in key_tree*/
2981d57628fSDongsheng Yang 	queue_work(cache_get_wq(cache), &cache->clean_work);
2991d57628fSDongsheng Yang }
3001d57628fSDongsheng Yang 
3011d57628fSDongsheng Yang void cache_seg_put(struct pcache_cache_segment *cache_seg)
3021d57628fSDongsheng Yang {
3031d57628fSDongsheng Yang 	if (atomic_dec_and_test(&cache_seg->refs))
3041d57628fSDongsheng Yang 		cache_seg_invalidate(cache_seg);
3051d57628fSDongsheng Yang }
306