xref: /linux/drivers/md/dm-cache-metadata.h (revision 69bfec7548f4c1595bac0e3ddfc0458a5af31f4c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 Red Hat, Inc.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #ifndef DM_CACHE_METADATA_H
9 #define DM_CACHE_METADATA_H
10 
11 #include "dm-cache-block-types.h"
12 #include "dm-cache-policy-internal.h"
13 #include "persistent-data/dm-space-map-metadata.h"
14 
15 /*----------------------------------------------------------------*/
16 
17 #define DM_CACHE_METADATA_BLOCK_SIZE DM_SM_METADATA_BLOCK_SIZE
18 
19 /* FIXME: remove this restriction */
20 /*
21  * The metadata device is currently limited in size.
22  */
23 #define DM_CACHE_METADATA_MAX_SECTORS DM_SM_METADATA_MAX_SECTORS
24 
25 /*
26  * A metadata device larger than 16GB triggers a warning.
27  */
28 #define DM_CACHE_METADATA_MAX_SECTORS_WARNING (16 * (1024 * 1024 * 1024 >> SECTOR_SHIFT))
29 
30 /*----------------------------------------------------------------*/
31 
32 /*
33  * Ext[234]-style compat feature flags.
34  *
35  * A new feature which old metadata will still be compatible with should
36  * define a DM_CACHE_FEATURE_COMPAT_* flag (rarely useful).
37  *
38  * A new feature that is not compatible with old code should define a
39  * DM_CACHE_FEATURE_INCOMPAT_* flag and guard the relevant code with
40  * that flag.
41  *
42  * A new feature that is not compatible with old code accessing the
43  * metadata RDWR should define a DM_CACHE_FEATURE_RO_COMPAT_* flag and
44  * guard the relevant code with that flag.
45  *
46  * As these various flags are defined they should be added to the
47  * following masks.
48  */
49 
50 #define DM_CACHE_FEATURE_COMPAT_SUPP	  0UL
51 #define DM_CACHE_FEATURE_COMPAT_RO_SUPP	  0UL
52 #define DM_CACHE_FEATURE_INCOMPAT_SUPP	  0UL
53 
54 struct dm_cache_metadata;
55 
56 /*
57  * Reopens or creates a new, empty metadata volume.  Returns an ERR_PTR on
58  * failure.  If reopening then features must match.
59  */
60 struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
61 						 sector_t data_block_size,
62 						 bool may_format_device,
63 						 size_t policy_hint_size,
64 						 unsigned int metadata_version);
65 
66 void dm_cache_metadata_close(struct dm_cache_metadata *cmd);
67 
68 /*
69  * The metadata needs to know how many cache blocks there are.  We don't
70  * care about the origin, assuming the core target is giving us valid
71  * origin blocks to map to.
72  */
73 int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size);
74 int dm_cache_size(struct dm_cache_metadata *cmd, dm_cblock_t *result);
75 
76 int dm_cache_discard_bitset_resize(struct dm_cache_metadata *cmd,
77 				   sector_t discard_block_size,
78 				   dm_dblock_t new_nr_entries);
79 
80 typedef int (*load_discard_fn)(void *context, sector_t discard_block_size,
81 			       dm_dblock_t dblock, bool discarded);
82 int dm_cache_load_discards(struct dm_cache_metadata *cmd,
83 			   load_discard_fn fn, void *context);
84 
85 int dm_cache_set_discard(struct dm_cache_metadata *cmd, dm_dblock_t dblock, bool discard);
86 
87 int dm_cache_remove_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock);
88 int dm_cache_insert_mapping(struct dm_cache_metadata *cmd, dm_cblock_t cblock, dm_oblock_t oblock);
89 int dm_cache_changed_this_transaction(struct dm_cache_metadata *cmd);
90 
91 typedef int (*load_mapping_fn)(void *context, dm_oblock_t oblock,
92 			       dm_cblock_t cblock, bool dirty,
93 			       uint32_t hint, bool hint_valid);
94 int dm_cache_load_mappings(struct dm_cache_metadata *cmd,
95 			   struct dm_cache_policy *policy,
96 			   load_mapping_fn fn,
97 			   void *context);
98 
99 int dm_cache_set_dirty_bits(struct dm_cache_metadata *cmd,
100 			    unsigned int nr_bits, unsigned long *bits);
101 
102 struct dm_cache_statistics {
103 	uint32_t read_hits;
104 	uint32_t read_misses;
105 	uint32_t write_hits;
106 	uint32_t write_misses;
107 };
108 
109 void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd,
110 				 struct dm_cache_statistics *stats);
111 
112 /*
113  * 'void' because it's no big deal if it fails.
114  */
115 void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd,
116 				 struct dm_cache_statistics *stats);
117 
118 int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown);
119 
120 int dm_cache_get_free_metadata_block_count(struct dm_cache_metadata *cmd,
121 					   dm_block_t *result);
122 
123 int dm_cache_get_metadata_dev_size(struct dm_cache_metadata *cmd,
124 				   dm_block_t *result);
125 
126 void dm_cache_dump(struct dm_cache_metadata *cmd);
127 
128 /*
129  * The policy is invited to save a 32bit hint value for every cblock (eg,
130  * for a hit count).  These are stored against the policy name.  If
131  * policies are changed, then hints will be lost.  If the machine crashes,
132  * hints will be lost.
133  *
134  * The hints are indexed by the cblock, but many policies will not
135  * necessarily have a fast way of accessing efficiently via cblock.  So
136  * rather than querying the policy for each cblock, we let it walk its data
137  * structures and fill in the hints in whatever order it wishes.
138  */
139 int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *p);
140 
141 /*
142  * Query method.  Are all the blocks in the cache clean?
143  */
144 int dm_cache_metadata_all_clean(struct dm_cache_metadata *cmd, bool *result);
145 
146 int dm_cache_metadata_needs_check(struct dm_cache_metadata *cmd, bool *result);
147 int dm_cache_metadata_set_needs_check(struct dm_cache_metadata *cmd);
148 void dm_cache_metadata_set_read_only(struct dm_cache_metadata *cmd);
149 void dm_cache_metadata_set_read_write(struct dm_cache_metadata *cmd);
150 int dm_cache_metadata_abort(struct dm_cache_metadata *cmd);
151 
152 /*----------------------------------------------------------------*/
153 
154 #endif /* DM_CACHE_METADATA_H */
155