xref: /linux/drivers/md/dm-cache-policy.h (revision 93d90ad708b8da6efc0e487b66111aa9db7f70c7)
1 /*
2  * Copyright (C) 2012 Red Hat. All rights reserved.
3  *
4  * This file is released under the GPL.
5  */
6 
7 #ifndef DM_CACHE_POLICY_H
8 #define DM_CACHE_POLICY_H
9 
10 #include "dm-cache-block-types.h"
11 
12 #include <linux/device-mapper.h>
13 
14 /*----------------------------------------------------------------*/
15 
16 /* FIXME: make it clear which methods are optional.  Get debug policy to
17  * double check this at start.
18  */
19 
20 /*
21  * The cache policy makes the important decisions about which blocks get to
22  * live on the faster cache device.
23  *
24  * When the core target has to remap a bio it calls the 'map' method of the
25  * policy.  This returns an instruction telling the core target what to do.
26  *
27  * POLICY_HIT:
28  *   That block is in the cache.  Remap to the cache and carry on.
29  *
30  * POLICY_MISS:
31  *   This block is on the origin device.  Remap and carry on.
32  *
33  * POLICY_NEW:
34  *   This block is currently on the origin device, but the policy wants to
35  *   move it.  The core should:
36  *
37  *   - hold any further io to this origin block
38  *   - copy the origin to the given cache block
39  *   - release all the held blocks
40  *   - remap the original block to the cache
41  *
42  * POLICY_REPLACE:
43  *   This block is currently on the origin device.  The policy wants to
44  *   move it to the cache, with the added complication that the destination
45  *   cache block needs a writeback first.  The core should:
46  *
47  *   - hold any further io to this origin block
48  *   - hold any further io to the origin block that's being written back
49  *   - writeback
50  *   - copy new block to cache
51  *   - release held blocks
52  *   - remap bio to cache and reissue.
53  *
54  * Should the core run into trouble while processing a POLICY_NEW or
55  * POLICY_REPLACE instruction it will roll back the policies mapping using
56  * remove_mapping() or force_mapping().  These methods must not fail.  This
57  * approach avoids having transactional semantics in the policy (ie, the
58  * core informing the policy when a migration is complete), and hence makes
59  * it easier to write new policies.
60  *
61  * In general policy methods should never block, except in the case of the
62  * map function when can_migrate is set.  So be careful to implement using
63  * bounded, preallocated memory.
64  */
65 enum policy_operation {
66 	POLICY_HIT,
67 	POLICY_MISS,
68 	POLICY_NEW,
69 	POLICY_REPLACE
70 };
71 
72 /*
73  * This is the instruction passed back to the core target.
74  */
75 struct policy_result {
76 	enum policy_operation op;
77 	dm_oblock_t old_oblock;	/* POLICY_REPLACE */
78 	dm_cblock_t cblock;	/* POLICY_HIT, POLICY_NEW, POLICY_REPLACE */
79 };
80 
81 typedef int (*policy_walk_fn)(void *context, dm_cblock_t cblock,
82 			      dm_oblock_t oblock, uint32_t hint);
83 
84 /*
85  * The cache policy object.  Just a bunch of methods.  It is envisaged that
86  * this structure will be embedded in a bigger, policy specific structure
87  * (ie. use container_of()).
88  */
89 struct dm_cache_policy {
90 
91 	/*
92 	 * FIXME: make it clear which methods are optional, and which may
93 	 * block.
94 	 */
95 
96 	/*
97 	 * Destroys this object.
98 	 */
99 	void (*destroy)(struct dm_cache_policy *p);
100 
101 	/*
102 	 * See large comment above.
103 	 *
104 	 * oblock      - the origin block we're interested in.
105 	 *
106 	 * can_block - indicates whether the current thread is allowed to
107 	 *             block.  -EWOULDBLOCK returned if it can't and would.
108 	 *
109 	 * can_migrate - gives permission for POLICY_NEW or POLICY_REPLACE
110 	 *               instructions.  If denied and the policy would have
111 	 *               returned one of these instructions it should
112 	 *               return -EWOULDBLOCK.
113 	 *
114 	 * discarded_oblock - indicates whether the whole origin block is
115 	 *               in a discarded state (FIXME: better to tell the
116 	 *               policy about this sooner, so it can recycle that
117 	 *               cache block if it wants.)
118 	 * bio         - the bio that triggered this call.
119 	 * result      - gets filled in with the instruction.
120 	 *
121 	 * May only return 0, or -EWOULDBLOCK (if !can_migrate)
122 	 */
123 	int (*map)(struct dm_cache_policy *p, dm_oblock_t oblock,
124 		   bool can_block, bool can_migrate, bool discarded_oblock,
125 		   struct bio *bio, struct policy_result *result);
126 
127 	/*
128 	 * Sometimes we want to see if a block is in the cache, without
129 	 * triggering any update of stats.  (ie. it's not a real hit).
130 	 *
131 	 * Must not block.
132 	 *
133 	 * Returns 0 if in cache, -ENOENT if not, < 0 for other errors
134 	 * (-EWOULDBLOCK would be typical).
135 	 */
136 	int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock);
137 
138 	void (*set_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
139 	void (*clear_dirty)(struct dm_cache_policy *p, dm_oblock_t oblock);
140 
141 	/*
142 	 * Called when a cache target is first created.  Used to load a
143 	 * mapping from the metadata device into the policy.
144 	 */
145 	int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock,
146 			    dm_cblock_t cblock, uint32_t hint, bool hint_valid);
147 
148 	int (*walk_mappings)(struct dm_cache_policy *p, policy_walk_fn fn,
149 			     void *context);
150 
151 	/*
152 	 * Override functions used on the error paths of the core target.
153 	 * They must succeed.
154 	 */
155 	void (*remove_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock);
156 	void (*force_mapping)(struct dm_cache_policy *p, dm_oblock_t current_oblock,
157 			      dm_oblock_t new_oblock);
158 
159 	/*
160 	 * This is called via the invalidate_cblocks message.  It is
161 	 * possible the particular cblock has already been removed due to a
162 	 * write io in passthrough mode.  In which case this should return
163 	 * -ENODATA.
164 	 */
165 	int (*remove_cblock)(struct dm_cache_policy *p, dm_cblock_t cblock);
166 
167 	/*
168 	 * Provide a dirty block to be written back by the core target.
169 	 *
170 	 * Returns:
171 	 *
172 	 * 0 and @cblock,@oblock: block to write back provided
173 	 *
174 	 * -ENODATA: no dirty blocks available
175 	 */
176 	int (*writeback_work)(struct dm_cache_policy *p, dm_oblock_t *oblock, dm_cblock_t *cblock);
177 
178 	/*
179 	 * How full is the cache?
180 	 */
181 	dm_cblock_t (*residency)(struct dm_cache_policy *p);
182 
183 	/*
184 	 * Because of where we sit in the block layer, we can be asked to
185 	 * map a lot of little bios that are all in the same block (no
186 	 * queue merging has occurred).  To stop the policy being fooled by
187 	 * these the core target sends regular tick() calls to the policy.
188 	 * The policy should only count an entry as hit once per tick.
189 	 */
190 	void (*tick)(struct dm_cache_policy *p);
191 
192 	/*
193 	 * Configuration.
194 	 */
195 	int (*emit_config_values)(struct dm_cache_policy *p,
196 				  char *result, unsigned maxlen);
197 	int (*set_config_value)(struct dm_cache_policy *p,
198 				const char *key, const char *value);
199 
200 	/*
201 	 * Book keeping ptr for the policy register, not for general use.
202 	 */
203 	void *private;
204 };
205 
206 /*----------------------------------------------------------------*/
207 
208 /*
209  * We maintain a little register of the different policy types.
210  */
211 #define CACHE_POLICY_NAME_SIZE 16
212 #define CACHE_POLICY_VERSION_SIZE 3
213 
214 struct dm_cache_policy_type {
215 	/* For use by the register code only. */
216 	struct list_head list;
217 
218 	/*
219 	 * Policy writers should fill in these fields.  The name field is
220 	 * what gets passed on the target line to select your policy.
221 	 */
222 	char name[CACHE_POLICY_NAME_SIZE];
223 	unsigned version[CACHE_POLICY_VERSION_SIZE];
224 
225 	/*
226 	 * For use by an alias dm_cache_policy_type to point to the
227 	 * real dm_cache_policy_type.
228 	 */
229 	struct dm_cache_policy_type *real;
230 
231 	/*
232 	 * Policies may store a hint for each each cache block.
233 	 * Currently the size of this hint must be 0 or 4 bytes but we
234 	 * expect to relax this in future.
235 	 */
236 	size_t hint_size;
237 
238 	struct module *owner;
239 	struct dm_cache_policy *(*create)(dm_cblock_t cache_size,
240 					  sector_t origin_size,
241 					  sector_t block_size);
242 };
243 
244 int dm_cache_policy_register(struct dm_cache_policy_type *type);
245 void dm_cache_policy_unregister(struct dm_cache_policy_type *type);
246 
247 /*----------------------------------------------------------------*/
248 
249 #endif	/* DM_CACHE_POLICY_H */
250