xref: /linux/drivers/md/dm-cache-metadata.h (revision 9ad8d22f2f3fad7a366c9772362795ef6d6a2d51)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 Red Hat, Inc.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #ifndef DM_CACHE_METADATA_H
9 #define DM_CACHE_METADATA_H
10 
11 #include "dm-cache-block-types.h"
12 #include "dm-cache-policy-internal.h"
13 #include "persistent-data/dm-space-map-metadata.h"
14 
15 /*----------------------------------------------------------------*/
16 
17 #define DM_CACHE_METADATA_BLOCK_SIZE DM_SM_METADATA_BLOCK_SIZE
18 
19 /* FIXME: remove this restriction */
20 /*
21  * The metadata device is currently limited in size.
22  */
23 #define DM_CACHE_METADATA_MAX_SECTORS DM_SM_METADATA_MAX_SECTORS
24 
25 /*
26  * A metadata device larger than 16GB triggers a warning.
27  */
28 #define DM_CACHE_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT))
29 
30 /*----------------------------------------------------------------*/
31 
32 /*
33  * Ext[234]-style compat feature flags.
34  *
35  * A new feature which old metadata will still be compatible with should
36  * define a DM_CACHE_FEATURE_COMPAT_* flag (rarely useful).
37  *
38  * A new feature that is not compatible with old code should define a
39  * DM_CACHE_FEATURE_INCOMPAT_* flag and guard the relevant code with
40  * that flag.
41  *
42  * A new feature that is not compatible with old code accessing the
43  * metadata RDWR should define a DM_CACHE_FEATURE_RO_COMPAT_* flag and
44  * guard the relevant code with that flag.
45  *
46  * As these various flags are defined they should be added to the
47  * following masks.
48  */
49 
50 #define DM_CACHE_FEATURE_COMPAT_SUPP	  0UL
51 #define DM_CACHE_FEATURE_COMPAT_RO_SUPP	  0UL
52 #define DM_CACHE_FEATURE_INCOMPAT_SUPP	  0UL
53 
54 struct dm_cache_metadata;
55 
56 /*
57  * Reopens or creates a new, empty metadata volume.  Returns an ERR_PTR on
58  * failure.  If reopening then features must match.
59  */
60 struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
61 						 sector_t data_block_size,
62 						 bool may_format_device,
63 						 size_t policy_hint_size,
64 						 unsigned int metadata_version);
65 
66 void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
67 
68 /*
69  * The metadata needs to know how many cache blocks there are.  We don't
70  * care about the origin, assuming the core target is giving us valid
71  * origin blocks to map to.
72  */
73 int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
74 
75 int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
76 				   sector_t discard_block_size,
77 				   dm_dblock_t new_nr_entries);
78 
79 typedef int (*load_discard_fn)(void *context, sector_t discard_block_size,
80 			       dm_dblock_t dblock, bool discarded);
81 int dm_cache_load_discards(struct dm_cache_metadata *cmd,
82 			   load_discard_fn fn, void *context);
83 
84 int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard);
85 
86 int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock);
87 int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock);
88 int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd);
89 
90 typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock,
91 			       dm_cblock_t cblock, bool dirty,
92 			       uint32_t hint, bool hint_valid);
93 int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
94 			   struct dm_cache_policy *policy,
95 			   load_mapping_fn fn,
96 			   void *context);
97 
98 int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
99 			    unsigned int nr_bits, unsigned long *bits);
100 
101 struct dm_cache_statistics {
102 	uint32_t read_hits;
103 	uint32_t read_misses;
104 	uint32_t write_hits;
105 	uint32_t write_misses;
106 };
107 
108 void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
109 				 struct dm_cache_statistics *stats);
110 
111 /*
112  * 'void' because it's no big deal if it fails.
113  */
114 void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
115 				 struct dm_cache_statistics *stats);
116 
117 int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown);
118 
119 int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
120 					   dm_block_t *result);
121 
122 int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
123 				   dm_block_t *result);
124 
125 /*
126  * The policy is invited to save a 32bit hint value for every cblock (eg,
127  * for a hit count).  These are stored against the policy name.  If
128  * policies are changed, then hints will be lost.  If the machine crashes,
129  * hints will be lost.
130  *
131  * The hints are indexed by the cblock, but many policies will not
132  * necessarily have a fast way of accessing efficiently via cblock.  So
133  * rather than querying the policy for each cblock, we let it walk its data
134  * structures and fill in the hints in whatever order it wishes.
135  */
136 int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
137 
138 /*
139  * Query method.  Are all the blocks in the cache clean?
140  */
141 int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
142 
143 int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result);
144 int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
145 void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
146 void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
147 int dm_cache_metadata_abort(struct dm_cache_metadata *cmd);
148 
149 /*----------------------------------------------------------------*/
150 
151 #endif /* DM_CACHE_METADATA_H */
152