xref: /freebsd/sys/geom/eli/g_eli_key_cache.c (revision 1719886f6d08408b834d270c59ffcfd821c8f63a)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2011-2019 Pawel Jakub Dawidek <pawel@dawidek.net>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/param.h>
30 #ifdef _KERNEL
31 #include <sys/kernel.h>
32 #include <sys/malloc.h>
33 #include <sys/sysctl.h>
34 #include <sys/systm.h>
35 #endif /* _KERNEL */
36 #include <sys/queue.h>
37 #include <sys/tree.h>
38 
39 #include <geom/geom.h>
40 
41 #include <geom/eli/g_eli.h>
42 
43 #ifdef _KERNEL
44 MALLOC_DECLARE(M_ELI);
45 
46 SYSCTL_DECL(_kern_geom_eli);
47 /*
48  * The default limit (8192 keys) will allow to cache all keys for 4TB
49  * provider with 512 bytes sectors and will take around 1MB of memory.
50  */
51 static u_int g_eli_key_cache_limit = 8192;
52 SYSCTL_UINT(_kern_geom_eli, OID_AUTO, key_cache_limit, CTLFLAG_RDTUN,
53     &g_eli_key_cache_limit, 0, "Maximum number of encryption keys to cache");
54 static uint64_t g_eli_key_cache_hits;
55 SYSCTL_UQUAD(_kern_geom_eli, OID_AUTO, key_cache_hits, CTLFLAG_RW,
56     &g_eli_key_cache_hits, 0, "Key cache hits");
57 static uint64_t g_eli_key_cache_misses;
58 SYSCTL_UQUAD(_kern_geom_eli, OID_AUTO, key_cache_misses, CTLFLAG_RW,
59     &g_eli_key_cache_misses, 0, "Key cache misses");
60 
61 static int
62 g_eli_key_cmp(const struct g_eli_key *a, const struct g_eli_key *b)
63 {
64 
65 	if (a->gek_keyno > b->gek_keyno)
66 		return (1);
67 	else if (a->gek_keyno < b->gek_keyno)
68 		return (-1);
69 	return (0);
70 }
71 #endif /* _KERNEL */
72 
73 void
74 g_eli_key_fill(struct g_eli_softc *sc, struct g_eli_key *key, uint64_t keyno)
75 {
76 	const uint8_t *ekey;
77 	struct {
78 		char magic[4];
79 		uint8_t keyno[8];
80 	} __packed hmacdata;
81 
82 	if ((sc->sc_flags & G_ELI_FLAG_ENC_IVKEY) != 0)
83 		ekey = sc->sc_mkey;
84 	else
85 		ekey = sc->sc_ekey;
86 
87 	bcopy("ekey", hmacdata.magic, 4);
88 	le64enc(hmacdata.keyno, keyno);
89 	g_eli_crypto_hmac(ekey, G_ELI_MAXKEYLEN, (uint8_t *)&hmacdata,
90 	    sizeof(hmacdata), key->gek_key, 0);
91 	key->gek_keyno = keyno;
92 	key->gek_count = 0;
93 	key->gek_magic = G_ELI_KEY_MAGIC;
94 }
95 
96 #ifdef _KERNEL
97 RB_PROTOTYPE(g_eli_key_tree, g_eli_key, gek_link, g_eli_key_cmp);
98 RB_GENERATE(g_eli_key_tree, g_eli_key, gek_link, g_eli_key_cmp);
99 
100 static struct g_eli_key *
101 g_eli_key_allocate(struct g_eli_softc *sc, uint64_t keyno)
102 {
103 	struct g_eli_key *key, *ekey, keysearch;
104 
105 	mtx_assert(&sc->sc_ekeys_lock, MA_OWNED);
106 	mtx_unlock(&sc->sc_ekeys_lock);
107 
108 	key = malloc(sizeof(*key), M_ELI, M_WAITOK);
109 	g_eli_key_fill(sc, key, keyno);
110 
111 	mtx_lock(&sc->sc_ekeys_lock);
112 	/*
113 	 * Recheck if the key wasn't added while we weren't holding the lock.
114 	 */
115 	keysearch.gek_keyno = keyno;
116 	ekey = RB_FIND(g_eli_key_tree, &sc->sc_ekeys_tree, &keysearch);
117 	if (ekey != NULL) {
118 		zfree(key, M_ELI);
119 		key = ekey;
120 		TAILQ_REMOVE(&sc->sc_ekeys_queue, key, gek_next);
121 	} else {
122 		RB_INSERT(g_eli_key_tree, &sc->sc_ekeys_tree, key);
123 		sc->sc_ekeys_allocated++;
124 	}
125 	TAILQ_INSERT_TAIL(&sc->sc_ekeys_queue, key, gek_next);
126 
127 	return (key);
128 }
129 
130 static struct g_eli_key *
131 g_eli_key_find_last(struct g_eli_softc *sc)
132 {
133 	struct g_eli_key *key;
134 
135 	mtx_assert(&sc->sc_ekeys_lock, MA_OWNED);
136 
137 	TAILQ_FOREACH(key, &sc->sc_ekeys_queue, gek_next) {
138 		if (key->gek_count == 0)
139 			break;
140 	}
141 
142 	return (key);
143 }
144 
145 static void
146 g_eli_key_replace(struct g_eli_softc *sc, struct g_eli_key *key, uint64_t keyno)
147 {
148 
149 	mtx_assert(&sc->sc_ekeys_lock, MA_OWNED);
150 	KASSERT(key->gek_magic == G_ELI_KEY_MAGIC, ("Invalid magic."));
151 
152 	RB_REMOVE(g_eli_key_tree, &sc->sc_ekeys_tree, key);
153 	TAILQ_REMOVE(&sc->sc_ekeys_queue, key, gek_next);
154 
155 	KASSERT(key->gek_count == 0, ("gek_count=%d", key->gek_count));
156 
157 	g_eli_key_fill(sc, key, keyno);
158 
159 	RB_INSERT(g_eli_key_tree, &sc->sc_ekeys_tree, key);
160 	TAILQ_INSERT_TAIL(&sc->sc_ekeys_queue, key, gek_next);
161 }
162 
163 static void
164 g_eli_key_remove(struct g_eli_softc *sc, struct g_eli_key *key)
165 {
166 
167 	mtx_assert(&sc->sc_ekeys_lock, MA_OWNED);
168 	KASSERT(key->gek_magic == G_ELI_KEY_MAGIC, ("Invalid magic."));
169 	KASSERT(key->gek_count == 0, ("gek_count=%d", key->gek_count));
170 
171 	RB_REMOVE(g_eli_key_tree, &sc->sc_ekeys_tree, key);
172 	TAILQ_REMOVE(&sc->sc_ekeys_queue, key, gek_next);
173 	sc->sc_ekeys_allocated--;
174 	zfree(key, M_ELI);
175 }
176 
177 void
178 g_eli_key_init(struct g_eli_softc *sc)
179 {
180 	uint8_t *mkey;
181 
182 	mtx_lock(&sc->sc_ekeys_lock);
183 
184 	mkey = sc->sc_mkey + sizeof(sc->sc_ivkey);
185 	if ((sc->sc_flags & G_ELI_FLAG_AUTH) == 0)
186 		bcopy(mkey, sc->sc_ekey, G_ELI_DATAKEYLEN);
187 	else {
188 		/*
189 		 * The encryption key is: ekey = HMAC_SHA512(Data-Key, 0x10)
190 		 */
191 		g_eli_crypto_hmac(mkey, G_ELI_MAXKEYLEN, "\x10", 1,
192 		    sc->sc_ekey, 0);
193 	}
194 
195 	if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0) {
196 		sc->sc_ekeys_total = 1;
197 		sc->sc_ekeys_allocated = 0;
198 	} else {
199 		off_t mediasize;
200 		size_t blocksize;
201 
202 		if ((sc->sc_flags & G_ELI_FLAG_AUTH) != 0) {
203 			struct g_provider *pp;
204 
205 			pp = LIST_FIRST(&sc->sc_geom->consumer)->provider;
206 			mediasize = pp->mediasize;
207 			blocksize = pp->sectorsize;
208 		} else {
209 			mediasize = sc->sc_mediasize;
210 			blocksize = sc->sc_sectorsize;
211 		}
212 		sc->sc_ekeys_total =
213 		    ((mediasize - 1) >> G_ELI_KEY_SHIFT) / blocksize + 1;
214 		sc->sc_ekeys_allocated = 0;
215 		TAILQ_INIT(&sc->sc_ekeys_queue);
216 		RB_INIT(&sc->sc_ekeys_tree);
217 		if (sc->sc_ekeys_total <= g_eli_key_cache_limit) {
218 			uint64_t keyno;
219 
220 			for (keyno = 0; keyno < sc->sc_ekeys_total; keyno++)
221 				(void)g_eli_key_allocate(sc, keyno);
222 			KASSERT(sc->sc_ekeys_total == sc->sc_ekeys_allocated,
223 			    ("sc_ekeys_total=%ju != sc_ekeys_allocated=%ju",
224 			    (uintmax_t)sc->sc_ekeys_total,
225 			    (uintmax_t)sc->sc_ekeys_allocated));
226 		}
227 	}
228 
229 	mtx_unlock(&sc->sc_ekeys_lock);
230 }
231 
232 void
233 g_eli_key_destroy(struct g_eli_softc *sc)
234 {
235 
236 	mtx_lock(&sc->sc_ekeys_lock);
237 	if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0) {
238 		explicit_bzero(sc->sc_ekey, sizeof(sc->sc_ekey));
239 	} else {
240 		struct g_eli_key *key;
241 
242 		while ((key = TAILQ_FIRST(&sc->sc_ekeys_queue)) != NULL)
243 			g_eli_key_remove(sc, key);
244 		TAILQ_INIT(&sc->sc_ekeys_queue);
245 		RB_INIT(&sc->sc_ekeys_tree);
246 	}
247 	mtx_unlock(&sc->sc_ekeys_lock);
248 }
249 
250 void
251 g_eli_key_resize(struct g_eli_softc *sc)
252 {
253 	uint64_t new_ekeys_total;
254 	off_t mediasize;
255 	size_t blocksize;
256 
257 	if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0) {
258 		return;
259 	}
260 
261 	mtx_lock(&sc->sc_ekeys_lock);
262 
263 	if ((sc->sc_flags & G_ELI_FLAG_AUTH) != 0) {
264 		struct g_provider *pp;
265 
266 		pp = LIST_FIRST(&sc->sc_geom->consumer)->provider;
267 		mediasize = pp->mediasize;
268 		blocksize = pp->sectorsize;
269 	} else {
270 		mediasize = sc->sc_mediasize;
271 		blocksize = sc->sc_sectorsize;
272 	}
273 	new_ekeys_total = ((mediasize - 1) >> G_ELI_KEY_SHIFT) / blocksize + 1;
274 	/* We only allow to grow. */
275 	KASSERT(new_ekeys_total >= sc->sc_ekeys_total,
276 	    ("new_ekeys_total=%ju < sc_ekeys_total=%ju",
277 	    (uintmax_t)new_ekeys_total, (uintmax_t)sc->sc_ekeys_total));
278 	if (new_ekeys_total <= g_eli_key_cache_limit) {
279 		uint64_t keyno;
280 
281 		for (keyno = sc->sc_ekeys_total; keyno < new_ekeys_total;
282 		    keyno++) {
283 			(void)g_eli_key_allocate(sc, keyno);
284 		}
285 		KASSERT(new_ekeys_total == sc->sc_ekeys_allocated,
286 		    ("new_ekeys_total=%ju != sc_ekeys_allocated=%ju",
287 		    (uintmax_t)new_ekeys_total,
288 		    (uintmax_t)sc->sc_ekeys_allocated));
289 	}
290 
291 	sc->sc_ekeys_total = new_ekeys_total;
292 
293 	mtx_unlock(&sc->sc_ekeys_lock);
294 }
295 
296 /*
297  * Select encryption key. If G_ELI_FLAG_SINGLE_KEY is present we only have one
298  * key available for all the data. If the flag is not present select the key
299  * based on data offset.
300  */
301 uint8_t *
302 g_eli_key_hold(struct g_eli_softc *sc, off_t offset, size_t blocksize)
303 {
304 	struct g_eli_key *key, keysearch;
305 	uint64_t keyno;
306 
307 	if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0)
308 		return (sc->sc_ekey);
309 
310 	/* We switch key every 2^G_ELI_KEY_SHIFT blocks. */
311 	keyno = (offset >> G_ELI_KEY_SHIFT) / blocksize;
312 
313 	KASSERT(keyno < sc->sc_ekeys_total,
314 	    ("%s: keyno=%ju >= sc_ekeys_total=%ju",
315 	    __func__, (uintmax_t)keyno, (uintmax_t)sc->sc_ekeys_total));
316 
317 	keysearch.gek_keyno = keyno;
318 
319 	if (sc->sc_ekeys_total == sc->sc_ekeys_allocated) {
320 		/* We have all the keys, so avoid some overhead. */
321 		key = RB_FIND(g_eli_key_tree, &sc->sc_ekeys_tree, &keysearch);
322 		KASSERT(key != NULL, ("No key %ju found.", (uintmax_t)keyno));
323 		KASSERT(key->gek_magic == G_ELI_KEY_MAGIC,
324 		    ("Invalid key magic."));
325 		return (key->gek_key);
326 	}
327 
328 	mtx_lock(&sc->sc_ekeys_lock);
329 	key = RB_FIND(g_eli_key_tree, &sc->sc_ekeys_tree, &keysearch);
330 	if (key != NULL) {
331 		g_eli_key_cache_hits++;
332 		TAILQ_REMOVE(&sc->sc_ekeys_queue, key, gek_next);
333 		TAILQ_INSERT_TAIL(&sc->sc_ekeys_queue, key, gek_next);
334 	} else {
335 		/*
336 		 * No key in cache, find the least recently unreferenced key
337 		 * or allocate one if we haven't reached our limit yet.
338 		 */
339 		if (sc->sc_ekeys_allocated < g_eli_key_cache_limit) {
340 			key = g_eli_key_allocate(sc, keyno);
341 		} else {
342 			g_eli_key_cache_misses++;
343 			key = g_eli_key_find_last(sc);
344 			if (key != NULL) {
345 				g_eli_key_replace(sc, key, keyno);
346 			} else {
347 				/* All keys are referenced? Allocate one. */
348 				key = g_eli_key_allocate(sc, keyno);
349 			}
350 		}
351 	}
352 	key->gek_count++;
353 	mtx_unlock(&sc->sc_ekeys_lock);
354 
355 	KASSERT(key->gek_magic == G_ELI_KEY_MAGIC, ("Invalid key magic."));
356 
357 	return (key->gek_key);
358 }
359 
360 void
361 g_eli_key_drop(struct g_eli_softc *sc, uint8_t *rawkey)
362 {
363 	struct g_eli_key *key = (struct g_eli_key *)rawkey;
364 
365 	if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) != 0)
366 		return;
367 
368 	KASSERT(key->gek_magic == G_ELI_KEY_MAGIC, ("Invalid key magic."));
369 
370 	if (sc->sc_ekeys_total == sc->sc_ekeys_allocated)
371 		return;
372 
373 	mtx_lock(&sc->sc_ekeys_lock);
374 	KASSERT(key->gek_count > 0, ("key->gek_count=%d", key->gek_count));
375 	key->gek_count--;
376 	while (sc->sc_ekeys_allocated > g_eli_key_cache_limit) {
377 		key = g_eli_key_find_last(sc);
378 		if (key == NULL)
379 			break;
380 		g_eli_key_remove(sc, key);
381 	}
382 	mtx_unlock(&sc->sc_ekeys_lock);
383 }
384 #endif /* _KERNEL */
385