xref: /linux/drivers/md/dm-vdo/indexer/sparse-cache.h (revision 79790b6818e96c58fe2bffee1b418c16e64e7b80)
1*17b1a73fSMike Snitzer /* SPDX-License-Identifier: GPL-2.0-only */
2*17b1a73fSMike Snitzer /*
3*17b1a73fSMike Snitzer  * Copyright 2023 Red Hat
4*17b1a73fSMike Snitzer  */
5*17b1a73fSMike Snitzer 
6*17b1a73fSMike Snitzer #ifndef UDS_SPARSE_CACHE_H
7*17b1a73fSMike Snitzer #define UDS_SPARSE_CACHE_H
8*17b1a73fSMike Snitzer 
9*17b1a73fSMike Snitzer #include "geometry.h"
10*17b1a73fSMike Snitzer #include "indexer.h"
11*17b1a73fSMike Snitzer 
12*17b1a73fSMike Snitzer /*
13*17b1a73fSMike Snitzer  * The sparse cache is a cache of entire chapter indexes from sparse chapters used for searching
14*17b1a73fSMike Snitzer  * for names after all other search paths have failed. It contains only complete chapter indexes;
15*17b1a73fSMike Snitzer  * record pages from sparse chapters and single index pages used for resolving hooks are kept in
16*17b1a73fSMike Snitzer  * the regular page cache in the volume.
17*17b1a73fSMike Snitzer  *
18*17b1a73fSMike Snitzer  * The most important property of this cache is the absence of synchronization for read operations.
19*17b1a73fSMike Snitzer  * Safe concurrent access to the cache by the zone threads is controlled by the triage queue and
20*17b1a73fSMike Snitzer  * the barrier requests it issues to the zone queues. The set of cached chapters does not and must
21*17b1a73fSMike Snitzer  * not change between the carefully coordinated calls to uds_update_sparse_cache() from the zone
22*17b1a73fSMike Snitzer  * threads. Outside of updates, every zone will get the same result when calling
23*17b1a73fSMike Snitzer  * uds_sparse_cache_contains() as every other zone.
24*17b1a73fSMike Snitzer  */
25*17b1a73fSMike Snitzer 
26*17b1a73fSMike Snitzer struct index_zone;
27*17b1a73fSMike Snitzer struct sparse_cache;
28*17b1a73fSMike Snitzer 
29*17b1a73fSMike Snitzer int __must_check uds_make_sparse_cache(const struct index_geometry *geometry,
30*17b1a73fSMike Snitzer 				       unsigned int capacity, unsigned int zone_count,
31*17b1a73fSMike Snitzer 				       struct sparse_cache **cache_ptr);
32*17b1a73fSMike Snitzer 
33*17b1a73fSMike Snitzer void uds_free_sparse_cache(struct sparse_cache *cache);
34*17b1a73fSMike Snitzer 
35*17b1a73fSMike Snitzer bool uds_sparse_cache_contains(struct sparse_cache *cache, u64 virtual_chapter,
36*17b1a73fSMike Snitzer 			       unsigned int zone_number);
37*17b1a73fSMike Snitzer 
38*17b1a73fSMike Snitzer int __must_check uds_update_sparse_cache(struct index_zone *zone, u64 virtual_chapter);
39*17b1a73fSMike Snitzer 
40*17b1a73fSMike Snitzer void uds_invalidate_sparse_cache(struct sparse_cache *cache);
41*17b1a73fSMike Snitzer 
42*17b1a73fSMike Snitzer int __must_check uds_search_sparse_cache(struct index_zone *zone,
43*17b1a73fSMike Snitzer 					 const struct uds_record_name *name,
44*17b1a73fSMike Snitzer 					 u64 *virtual_chapter_ptr, u16 *record_page_ptr);
45*17b1a73fSMike Snitzer 
46*17b1a73fSMike Snitzer #endif /* UDS_SPARSE_CACHE_H */
47