xref: /freebsd/sys/geom/eli/g_eli_key_cache.c (revision 031beb4e239bfce798af17f5fe8dba8bcaf13d99)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011-2019 Pawel Jakub Dawidek <pawel@dawidek.net>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/param.h>
31 #ifdef _KERNEL
32 #include <sys/kernel.h>
33 #include <sys/malloc.h>
34 #include <sys/sysctl.h>
35 #include <sys/systm.h>
36 #endif /* _KERNEL */
37 #include <sys/queue.h>
38 #include <sys/tree.h>
39 
40 #include <geom/geom.h>
41 
42 #include <geom/eli/g_eli.h>
43 
44 #ifdef _KERNEL
45 MALLOC_DECLARE(M_ELI);
46 
47 SYSCTL_DECL(_kern_geom_eli);
48 /*
49  * The default limit (8192 keys) will allow to cache all keys for 4TB
50  * provider with 512 bytes sectors and will take around 1MB of memory.
51  */
52 static u_int g_eli_key_cache_limit = 8192;
53 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, key_cache_limit, CTLFLAG_RDTUN,
54     &g_eli_key_cache_limit, 0, "Maximum number of encryption keys to cache");
55 static uint64_t g_eli_key_cache_hits;
56 SYSCTL_UQUAD(_kern_geom_eli, OID_AUTO, key_cache_hits, CTLFLAG_RW,
57     &g_eli_key_cache_hits, 0, "Key cache hits");
58 static uint64_t g_eli_key_cache_misses;
59 SYSCTL_UQUAD(_kern_geom_eli, OID_AUTO, key_cache_misses, CTLFLAG_RW,
60     &g_eli_key_cache_misses, 0, "Key cache misses");
61 
62 static int
63 g_eli_key_cmp(const struct g_eli_key *a, const struct g_eli_key *b)
64 {
65 
66 	if (a->gek_keyno > b->gek_keyno)
67 		return (1);
68 	else if (a->gek_keyno < b->gek_keyno)
69 		return (-1);
70 	return (0);
71 }
72 #endif /* _KERNEL */
73 
74 void
75 g_eli_key_fill(struct g_eli_softc *sc, struct g_eli_key *key, uint64_t keyno)
76 {
77 	const uint8_t *ekey;
78 	struct {
79 		char magic[4];
80 		uint8_t keyno[8];
81 	} __packed hmacdata;
82 
83 	if ((sc->sc_flags & G_ELI_FLAG_ENC_IVKEY) != 0)
84 		ekey = sc->sc_mkey;
85 	else
86 		ekey = sc->sc_ekey;
87 
88 	bcopy("ekey", hmacdata.magic, 4);
89 	le64enc(hmacdata.keyno, keyno);
90 	g_eli_crypto_hmac(ekey, G_ELI_MAXKEYLEN, (uint8_t *)&hmacdata,
91 	    sizeof(hmacdata), key->gek_key, 0);
92 	key->gek_keyno = keyno;
93 	key->gek_count = 0;
94 	key->gek_magic = G_ELI_KEY_MAGIC;
95 }
96 
97 #ifdef _KERNEL
98 RB_PROTOTYPE(g_eli_key_tree, g_eli_key, gek_link, g_eli_key_cmp);
99 RB_GENERATE(g_eli_key_tree, g_eli_key, gek_link, g_eli_key_cmp);
100 
101 static struct g_eli_key *
102 g_eli_key_allocate(struct g_eli_softc *sc, uint64_t keyno)
103 {
104 	struct g_eli_key *key, *ekey, keysearch;
105 
106 	mtx_assert(&sc->sc_ekeys_lock, MA_OWNED);
107 	mtx_unlock(&sc->sc_ekeys_lock);
108 
109 	key = malloc(sizeof(*key), M_ELI, M_WAITOK);
110 	g_eli_key_fill(sc, key, keyno);
111 
112 	mtx_lock(&sc->sc_ekeys_lock);
113 	/*
114 	 * Recheck if the key wasn't added while we weren't holding the lock.
115 	 */
116 	keysearch.gek_keyno = keyno;
117 	ekey = RB_FIND(g_eli_key_tree, &sc->sc_ekeys_tree, &keysearch);
118 	if (ekey != NULL) {
119 		zfree(key, M_ELI);
120 		key = ekey;
121 		TAILQ_REMOVE(&sc->sc_ekeys_queue, key, gek_next);
122 	} else {
123 		RB_INSERT(g_eli_key_tree, &sc->sc_ekeys_tree, key);
124 		sc->sc_ekeys_allocated++;
125 	}
126 	TAILQ_INSERT_TAIL(&sc->sc_ekeys_queue, key, gek_next);
127 
128 	return (key);
129 }
130 
131 static struct g_eli_key *
132 g_eli_key_find_last(struct g_eli_softc *sc)
133 {
134 	struct g_eli_key *key;
135 
136 	mtx_assert(&sc->sc_ekeys_lock, MA_OWNED);
137 
138 	TAILQ_FOREACH(key, &sc->sc_ekeys_queue, gek_next) {
139 		if (key->gek_count == 0)
140 			break;
141 	}
142 
143 	return (key);
144 }
145 
146 static void
147 g_eli_key_replace(struct g_eli_softc *sc, struct g_eli_key *key, uint64_t keyno)
148 {
149 
150 	mtx_assert(&sc->sc_ekeys_lock, MA_OWNED);
151 	KASSERT(key->gek_magic == G_ELI_KEY_MAGIC, ("Invalid magic."));
152 
153 	RB_REMOVE(g_eli_key_tree, &sc->sc_ekeys_tree, key);
154 	TAILQ_REMOVE(&sc->sc_ekeys_queue, key, gek_next);
155 
156 	KASSERT(key->gek_count == 0, ("gek_count=%d", key->gek_count));
157 
158 	g_eli_key_fill(sc, key, keyno);
159 
160 	RB_INSERT(g_eli_key_tree, &sc->sc_ekeys_tree, key);
161 	TAILQ_INSERT_TAIL(&sc->sc_ekeys_queue, key, gek_next);
162 }
163 
164 static void
165 g_eli_key_remove(struct g_eli_softc *sc, struct g_eli_key *key)
166 {
167 
168 	mtx_assert(&sc->sc_ekeys_lock, MA_OWNED);
169 	KASSERT(key->gek_magic == G_ELI_KEY_MAGIC, ("Invalid magic."));
170 	KASSERT(key->gek_count == 0, ("gek_count=%d", key->gek_count));
171 
172 	RB_REMOVE(g_eli_key_tree, &sc->sc_ekeys_tree, key);
173 	TAILQ_REMOVE(&sc->sc_ekeys_queue, key, gek_next);
174 	sc->sc_ekeys_allocated--;
175 	zfree(key, M_ELI);
176 }
177 
178 void
179 g_eli_key_init(struct g_eli_softc *sc)
180 {
181 	uint8_t *mkey;
182 
183 	mtx_lock(&sc->sc_ekeys_lock);
184 
185 	mkey = sc->sc_mkey + sizeof(sc->sc_ivkey);
186 	if ((sc->sc_flags & G_ELI_FLAG_AUTH) == 0)
187 		bcopy(mkey, sc->sc_ekey, G_ELI_DATAKEYLEN);
188 	else {
189 		/*
190 		 * The encryption key is: ekey = HMAC_SHA512(Data-Key, 0x10)
191 		 */
192 		g_eli_crypto_hmac(mkey, G_ELI_MAXKEYLEN, "\x10", 1,
193 		    sc->sc_ekey, 0);
194 	}
195 
196 	if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0) {
197 		sc->sc_ekeys_total = 1;
198 		sc->sc_ekeys_allocated = 0;
199 	} else {
200 		off_t mediasize;
201 		size_t blocksize;
202 
203 		if ((sc->sc_flags & G_ELI_FLAG_AUTH) != 0) {
204 			struct g_provider *pp;
205 
206 			pp = LIST_FIRST(&sc->sc_geom->consumer)->provider;
207 			mediasize = pp->mediasize;
208 			blocksize = pp->sectorsize;
209 		} else {
210 			mediasize = sc->sc_mediasize;
211 			blocksize = sc->sc_sectorsize;
212 		}
213 		sc->sc_ekeys_total =
214 		    ((mediasize - 1) >> G_ELI_KEY_SHIFT) / blocksize + 1;
215 		sc->sc_ekeys_allocated = 0;
216 		TAILQ_INIT(&sc->sc_ekeys_queue);
217 		RB_INIT(&sc->sc_ekeys_tree);
218 		if (sc->sc_ekeys_total <= g_eli_key_cache_limit) {
219 			uint64_t keyno;
220 
221 			for (keyno = 0; keyno < sc->sc_ekeys_total; keyno++)
222 				(void)g_eli_key_allocate(sc, keyno);
223 			KASSERT(sc->sc_ekeys_total == sc->sc_ekeys_allocated,
224 			    ("sc_ekeys_total=%ju != sc_ekeys_allocated=%ju",
225 			    (uintmax_t)sc->sc_ekeys_total,
226 			    (uintmax_t)sc->sc_ekeys_allocated));
227 		}
228 	}
229 
230 	mtx_unlock(&sc->sc_ekeys_lock);
231 }
232 
233 void
234 g_eli_key_destroy(struct g_eli_softc *sc)
235 {
236 
237 	mtx_lock(&sc->sc_ekeys_lock);
238 	if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0) {
239 		explicit_bzero(sc->sc_ekey, sizeof(sc->sc_ekey));
240 	} else {
241 		struct g_eli_key *key;
242 
243 		while ((key = TAILQ_FIRST(&sc->sc_ekeys_queue)) != NULL)
244 			g_eli_key_remove(sc, key);
245 		TAILQ_INIT(&sc->sc_ekeys_queue);
246 		RB_INIT(&sc->sc_ekeys_tree);
247 	}
248 	mtx_unlock(&sc->sc_ekeys_lock);
249 }
250 
251 void
252 g_eli_key_resize(struct g_eli_softc *sc)
253 {
254 	uint64_t new_ekeys_total;
255 	off_t mediasize;
256 	size_t blocksize;
257 
258 	if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0) {
259 		return;
260 	}
261 
262 	mtx_lock(&sc->sc_ekeys_lock);
263 
264 	if ((sc->sc_flags & G_ELI_FLAG_AUTH) != 0) {
265 		struct g_provider *pp;
266 
267 		pp = LIST_FIRST(&sc->sc_geom->consumer)->provider;
268 		mediasize = pp->mediasize;
269 		blocksize = pp->sectorsize;
270 	} else {
271 		mediasize = sc->sc_mediasize;
272 		blocksize = sc->sc_sectorsize;
273 	}
274 	new_ekeys_total = ((mediasize - 1) >> G_ELI_KEY_SHIFT) / blocksize + 1;
275 	/* We only allow to grow. */
276 	KASSERT(new_ekeys_total >= sc->sc_ekeys_total,
277 	    ("new_ekeys_total=%ju < sc_ekeys_total=%ju",
278 	    (uintmax_t)new_ekeys_total, (uintmax_t)sc->sc_ekeys_total));
279 	if (new_ekeys_total <= g_eli_key_cache_limit) {
280 		uint64_t keyno;
281 
282 		for (keyno = sc->sc_ekeys_total; keyno < new_ekeys_total;
283 		    keyno++) {
284 			(void)g_eli_key_allocate(sc, keyno);
285 		}
286 		KASSERT(new_ekeys_total == sc->sc_ekeys_allocated,
287 		    ("new_ekeys_total=%ju != sc_ekeys_allocated=%ju",
288 		    (uintmax_t)new_ekeys_total,
289 		    (uintmax_t)sc->sc_ekeys_allocated));
290 	}
291 
292 	sc->sc_ekeys_total = new_ekeys_total;
293 
294 	mtx_unlock(&sc->sc_ekeys_lock);
295 }
296 
297 /*
298  * Select encryption key. If G_ELI_FLAG_SINGLE_KEY is present we only have one
299  * key available for all the data. If the flag is not present select the key
300  * based on data offset.
301  */
302 uint8_t *
303 g_eli_key_hold(struct g_eli_softc *sc, off_t offset, size_t blocksize)
304 {
305 	struct g_eli_key *key, keysearch;
306 	uint64_t keyno;
307 
308 	if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0)
309 		return (sc->sc_ekey);
310 
311 	/* We switch key every 2^G_ELI_KEY_SHIFT blocks. */
312 	keyno = (offset >> G_ELI_KEY_SHIFT) / blocksize;
313 
314 	KASSERT(keyno < sc->sc_ekeys_total,
315 	    ("%s: keyno=%ju >= sc_ekeys_total=%ju",
316 	    __func__, (uintmax_t)keyno, (uintmax_t)sc->sc_ekeys_total));
317 
318 	keysearch.gek_keyno = keyno;
319 
320 	if (sc->sc_ekeys_total == sc->sc_ekeys_allocated) {
321 		/* We have all the keys, so avoid some overhead. */
322 		key = RB_FIND(g_eli_key_tree, &sc->sc_ekeys_tree, &keysearch);
323 		KASSERT(key != NULL, ("No key %ju found.", (uintmax_t)keyno));
324 		KASSERT(key->gek_magic == G_ELI_KEY_MAGIC,
325 		    ("Invalid key magic."));
326 		return (key->gek_key);
327 	}
328 
329 	mtx_lock(&sc->sc_ekeys_lock);
330 	key = RB_FIND(g_eli_key_tree, &sc->sc_ekeys_tree, &keysearch);
331 	if (key != NULL) {
332 		g_eli_key_cache_hits++;
333 		TAILQ_REMOVE(&sc->sc_ekeys_queue, key, gek_next);
334 		TAILQ_INSERT_TAIL(&sc->sc_ekeys_queue, key, gek_next);
335 	} else {
336 		/*
337 		 * No key in cache, find the least recently unreferenced key
338 		 * or allocate one if we haven't reached our limit yet.
339 		 */
340 		if (sc->sc_ekeys_allocated < g_eli_key_cache_limit) {
341 			key = g_eli_key_allocate(sc, keyno);
342 		} else {
343 			g_eli_key_cache_misses++;
344 			key = g_eli_key_find_last(sc);
345 			if (key != NULL) {
346 				g_eli_key_replace(sc, key, keyno);
347 			} else {
348 				/* All keys are referenced? Allocate one. */
349 				key = g_eli_key_allocate(sc, keyno);
350 			}
351 		}
352 	}
353 	key->gek_count++;
354 	mtx_unlock(&sc->sc_ekeys_lock);
355 
356 	KASSERT(key->gek_magic == G_ELI_KEY_MAGIC, ("Invalid key magic."));
357 
358 	return (key->gek_key);
359 }
360 
361 void
362 g_eli_key_drop(struct g_eli_softc *sc, uint8_t *rawkey)
363 {
364 	struct g_eli_key *key = (struct g_eli_key *)rawkey;
365 
366 	if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0)
367 		return;
368 
369 	KASSERT(key->gek_magic == G_ELI_KEY_MAGIC, ("Invalid key magic."));
370 
371 	if (sc->sc_ekeys_total == sc->sc_ekeys_allocated)
372 		return;
373 
374 	mtx_lock(&sc->sc_ekeys_lock);
375 	KASSERT(key->gek_count > 0, ("key->gek_count=%d", key->gek_count));
376 	key->gek_count--;
377 	while (sc->sc_ekeys_allocated > g_eli_key_cache_limit) {
378 		key = g_eli_key_find_last(sc);
379 		if (key == NULL)
380 			break;
381 		g_eli_key_remove(sc, key);
382 	}
383 	mtx_unlock(&sc->sc_ekeys_lock);
384 }
385 #endif /* _KERNEL */
386