xref: /linux/drivers/md/dm-cache-policy.h (revision 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012 Red Hat. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7 
8 #ifndef DM_CACHE_POLICY_H
9 #define DM_CACHE_POLICY_H
10 
11 #include "dm-cache-block-types.h"
12 
13 #include <linux/device-mapper.h>
14 
15 /*----------------------------------------------------------------*/
16 
17 /*
18  * The cache policy makes the important decisions about which blocks get to
19  * live on the faster cache device.
20  */
21 enum policy_operation {
22 	POLICY_PROMOTE,
23 	POLICY_DEMOTE,
24 	POLICY_WRITEBACK
25 };
26 
27 /*
28  * This is the instruction passed back to the core target.
29  */
30 struct policy_work {
31 	enum policy_operation op;
32 	dm_oblock_t oblock;
33 	dm_cblock_t cblock;
34 };
35 
36 /*
37  * The cache policy object.  It is envisaged that this structure will be
38  * embedded in a bigger, policy specific structure (ie. use container_of()).
39  */
40 struct dm_cache_policy {
41 	/*
42 	 * Destroys this object.
43 	 */
44 	void (*destroy)(struct dm_cache_policy *p);
45 
46 	/*
47 	 * Find the location of a block.
48 	 *
49 	 * Must not block.
50 	 *
51 	 * Returns 0 if in cache (cblock will be set), -ENOENT if not, < 0 for
52 	 * other errors (-EWOULDBLOCK would be typical).  data_dir should be
53 	 * READ or WRITE. fast_copy should be set if migrating this block would
54 	 * be 'cheap' somehow (eg, discarded data). background_queued will be set
55 	 * if a migration has just been queued.
56 	 */
57 	int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock,
58 		      int data_dir, bool fast_copy, bool *background_queued);
59 
60 	/*
61 	 * Sometimes the core target can optimise a migration, eg, the
62 	 * block may be discarded, or the bio may cover an entire block.
63 	 * In order to optimise it needs the migration immediately though
64 	 * so it knows to do something different with the bio.
65 	 *
66 	 * This method is optional (policy-internal will fallback to using
67 	 * lookup).
68 	 */
69 	int (*lookup_with_work)(struct dm_cache_policy *p,
70 				dm_oblock_t oblock, dm_cblock_t *cblock,
71 				int data_dir, bool fast_copy,
72 				struct policy_work **work);
73 
74 	/*
75 	 * Retrieves background work.  Returns -ENODATA when there's no
76 	 * background work.
77 	 */
78 	int (*get_background_work)(struct dm_cache_policy *p, bool idle,
79 				   struct policy_work **result);
80 
81 	/*
82 	 * You must pass in the same work pointer that you were given, not
83 	 * a copy.
84 	 */
85 	void (*complete_background_work)(struct dm_cache_policy *p,
86 					 struct policy_work *work,
87 					 bool success);
88 
89 	void (*set_dirty)(struct dm_cache_policy *p, dm_cblock_t cblock);
90 	void (*clear_dirty)(struct dm_cache_policy *p, dm_cblock_t cblock);
91 
92 	/*
93 	 * Called when a cache target is first created.  Used to load a
94 	 * mapping from the metadata device into the policy.
95 	 */
96 	int (*load_mapping)(struct dm_cache_policy *p, dm_oblock_t oblock,
97 			    dm_cblock_t cblock, bool dirty,
98 			    uint32_t hint, bool hint_valid);
99 
100 	/*
101 	 * Drops the mapping, irrespective of whether it's clean or dirty.
102 	 * Returns -ENODATA if cblock is not mapped.
103 	 */
104 	int (*invalidate_mapping)(struct dm_cache_policy *p, dm_cblock_t cblock);
105 
106 	/*
107 	 * Gets the hint for a given cblock.  Called in a single threaded
108 	 * context.  So no locking required.
109 	 */
110 	uint32_t (*get_hint)(struct dm_cache_policy *p, dm_cblock_t cblock);
111 
112 	/*
113 	 * How full is the cache?
114 	 */
115 	dm_cblock_t (*residency)(struct dm_cache_policy *p);
116 
117 	/*
118 	 * Because of where we sit in the block layer, we can be asked to
119 	 * map a lot of little bios that are all in the same block (no
120 	 * queue merging has occurred).  To stop the policy being fooled by
121 	 * these, the core target sends regular tick() calls to the policy.
122 	 * The policy should only count an entry as hit once per tick.
123 	 *
124 	 * This method is optional.
125 	 */
126 	void (*tick)(struct dm_cache_policy *p, bool can_block);
127 
128 	/*
129 	 * Configuration.
130 	 */
131 	int (*emit_config_values)(struct dm_cache_policy *p, char *result,
132 				  unsigned int maxlen, ssize_t *sz_ptr);
133 	int (*set_config_value)(struct dm_cache_policy *p,
134 				const char *key, const char *value);
135 
136 	void (*allow_migrations)(struct dm_cache_policy *p, bool allow);
137 
138 	/*
139 	 * Book keeping ptr for the policy register, not for general use.
140 	 */
141 	void *private;
142 };
143 
144 /*----------------------------------------------------------------*/
145 
146 /*
147  * We maintain a little register of the different policy types.
148  */
149 #define CACHE_POLICY_NAME_SIZE 16
150 #define CACHE_POLICY_VERSION_SIZE 3
151 
152 struct dm_cache_policy_type {
153 	/* For use by the register code only. */
154 	struct list_head list;
155 
156 	/*
157 	 * Policy writers should fill in these fields.  The name field is
158 	 * what gets passed on the target line to select your policy.
159 	 */
160 	char name[CACHE_POLICY_NAME_SIZE];
161 	unsigned int version[CACHE_POLICY_VERSION_SIZE];
162 
163 	/*
164 	 * For use by an alias dm_cache_policy_type to point to the
165 	 * real dm_cache_policy_type.
166 	 */
167 	struct dm_cache_policy_type *real;
168 
169 	/*
170 	 * Policies may store a hint for each cache block.
171 	 * Currently the size of this hint must be 0 or 4 bytes but we
172 	 * expect to relax this in future.
173 	 */
174 	size_t hint_size;
175 
176 	struct module *owner;
177 	struct dm_cache_policy *(*create)(dm_cblock_t cache_size,
178 					  sector_t origin_size,
179 					  sector_t block_size);
180 };
181 
182 int dm_cache_policy_register(struct dm_cache_policy_type *type);
183 void dm_cache_policy_unregister(struct dm_cache_policy_type *type);
184 
185 /*----------------------------------------------------------------*/
186 
187 #endif	/* DM_CACHE_POLICY_H */
188