xref: /freebsd/sys/crypto/aesni/aesni.c (revision 40a8ac8f62b535d30349faf28cf47106b7041b83)
1 /*-
2  * Copyright (c) 2005-2008 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3  * Copyright (c) 2010 Konstantin Belousov <kib@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/kobj.h>
35 #include <sys/libkern.h>
36 #include <sys/lock.h>
37 #include <sys/module.h>
38 #include <sys/malloc.h>
39 #include <sys/rwlock.h>
40 #include <sys/bus.h>
41 #include <sys/uio.h>
42 #include <crypto/aesni/aesni.h>
43 #include <cryptodev_if.h>
44 
45 struct aesni_softc {
46 	int32_t cid;
47 	uint32_t sid;
48 	TAILQ_HEAD(aesni_sessions_head, aesni_session) sessions;
49 	struct rwlock lock;
50 };
51 
52 static int aesni_newsession(device_t, uint32_t *sidp, struct cryptoini *cri);
53 static int aesni_freesession(device_t, uint64_t tid);
54 static void aesni_freesession_locked(struct aesni_softc *sc,
55     struct aesni_session *ses);
56 static int aesni_cipher_setup(struct aesni_session *ses,
57     struct cryptoini *encini);
58 static int aesni_cipher_process(struct aesni_session *ses,
59     struct cryptodesc *enccrd, struct cryptop *crp);
60 
61 MALLOC_DEFINE(M_AESNI, "aesni_data", "AESNI Data");
62 
63 static void
64 aesni_identify(driver_t *drv, device_t parent)
65 {
66 
67 	/* NB: order 10 is so we get attached after h/w devices */
68 	if (device_find_child(parent, "aesni", -1) == NULL &&
69 	    BUS_ADD_CHILD(parent, 10, "aesni", -1) == 0)
70 		panic("aesni: could not attach");
71 }
72 
73 static int
74 aesni_probe(device_t dev)
75 {
76 
77 	if ((cpu_feature2 & CPUID2_AESNI) == 0) {
78 		device_printf(dev, "No AESNI support.\n");
79 		return (EINVAL);
80 	}
81 
82 	if ((cpu_feature & CPUID_SSE2) == 0) {
83 		device_printf(dev, "No SSE2 support but AESNI!?!\n");
84 		return (EINVAL);
85 	}
86 
87 	device_set_desc_copy(dev, "AES-CBC,AES-XTS");
88 	return (0);
89 }
90 
91 static int
92 aesni_attach(device_t dev)
93 {
94 	struct aesni_softc *sc;
95 
96 	sc = device_get_softc(dev);
97 	TAILQ_INIT(&sc->sessions);
98 	sc->sid = 1;
99 	sc->cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE |
100 	    CRYPTOCAP_F_SYNC);
101 	if (sc->cid < 0) {
102 		device_printf(dev, "Could not get crypto driver id.\n");
103 		return (ENOMEM);
104 	}
105 
106 	rw_init(&sc->lock, "aesni_lock");
107 	crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
108 	crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0);
109 	return (0);
110 }
111 
112 static int
113 aesni_detach(device_t dev)
114 {
115 	struct aesni_softc *sc;
116 	struct aesni_session *ses;
117 
118 	sc = device_get_softc(dev);
119 	rw_wlock(&sc->lock);
120 	TAILQ_FOREACH(ses, &sc->sessions, next) {
121 		if (ses->used) {
122 			rw_wunlock(&sc->lock);
123 			device_printf(dev,
124 			    "Cannot detach, sessions still active.\n");
125 			return (EBUSY);
126 		}
127 	}
128 	while ((ses = TAILQ_FIRST(&sc->sessions)) != NULL) {
129 		TAILQ_REMOVE(&sc->sessions, ses, next);
130 		fpu_kern_free_ctx(ses->fpu_ctx);
131 		free(ses, M_AESNI);
132 	}
133 	rw_wunlock(&sc->lock);
134 	rw_destroy(&sc->lock);
135 	crypto_unregister_all(sc->cid);
136 	return (0);
137 }
138 
139 static int
140 aesni_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
141 {
142 	struct aesni_softc *sc;
143 	struct aesni_session *ses;
144 	struct cryptoini *encini;
145 	int error;
146 
147 	if (sidp == NULL || cri == NULL)
148 		return (EINVAL);
149 
150 	sc = device_get_softc(dev);
151 	ses = NULL;
152 	encini = NULL;
153 	for (; cri != NULL; cri = cri->cri_next) {
154 		switch (cri->cri_alg) {
155 		case CRYPTO_AES_CBC:
156 		case CRYPTO_AES_XTS:
157 			if (encini != NULL)
158 				return (EINVAL);
159 			encini = cri;
160 			break;
161 		default:
162 			return (EINVAL);
163 		}
164 	}
165 	if (encini == NULL)
166 		return (EINVAL);
167 
168 	rw_wlock(&sc->lock);
169 	/*
170 	 * Free sessions goes first, so if first session is used, we need to
171 	 * allocate one.
172 	 */
173 	ses = TAILQ_FIRST(&sc->sessions);
174 	if (ses == NULL || ses->used) {
175 		ses = malloc(sizeof(*ses), M_AESNI, M_NOWAIT | M_ZERO);
176 		if (ses == NULL) {
177 			rw_wunlock(&sc->lock);
178 			return (ENOMEM);
179 		}
180 		ses->fpu_ctx = fpu_kern_alloc_ctx(FPU_KERN_NORMAL |
181 		    FPU_KERN_NOWAIT);
182 		if (ses->fpu_ctx == NULL) {
183 			free(ses, M_AESNI);
184 			rw_wunlock(&sc->lock);
185 			return (ENOMEM);
186 		}
187 		ses->id = sc->sid++;
188 	} else {
189 		TAILQ_REMOVE(&sc->sessions, ses, next);
190 	}
191 	ses->used = 1;
192 	TAILQ_INSERT_TAIL(&sc->sessions, ses, next);
193 	rw_wunlock(&sc->lock);
194 	ses->algo = encini->cri_alg;
195 
196 	error = aesni_cipher_setup(ses, encini);
197 	if (error != 0) {
198 		rw_wlock(&sc->lock);
199 		aesni_freesession_locked(sc, ses);
200 		rw_wunlock(&sc->lock);
201 		return (error);
202 	}
203 
204 	*sidp = ses->id;
205 	return (0);
206 }
207 
208 static void
209 aesni_freesession_locked(struct aesni_softc *sc, struct aesni_session *ses)
210 {
211 	struct fpu_kern_ctx *ctx;
212 	uint32_t sid;
213 
214 	sid = ses->id;
215 	TAILQ_REMOVE(&sc->sessions, ses, next);
216 	ctx = ses->fpu_ctx;
217 	bzero(ses, sizeof(*ses));
218 	ses->id = sid;
219 	ses->fpu_ctx = ctx;
220 	TAILQ_INSERT_HEAD(&sc->sessions, ses, next);
221 }
222 
223 static int
224 aesni_freesession(device_t dev, uint64_t tid)
225 {
226 	struct aesni_softc *sc;
227 	struct aesni_session *ses;
228 	uint32_t sid;
229 
230 	sc = device_get_softc(dev);
231 	sid = ((uint32_t)tid) & 0xffffffff;
232 	rw_wlock(&sc->lock);
233 	TAILQ_FOREACH_REVERSE(ses, &sc->sessions, aesni_sessions_head, next) {
234 		if (ses->id == sid)
235 			break;
236 	}
237 	if (ses == NULL) {
238 		rw_wunlock(&sc->lock);
239 		return (EINVAL);
240 	}
241 	aesni_freesession_locked(sc, ses);
242 	rw_wunlock(&sc->lock);
243 	return (0);
244 }
245 
246 static int
247 aesni_process(device_t dev, struct cryptop *crp, int hint __unused)
248 {
249 	struct aesni_softc *sc = device_get_softc(dev);
250 	struct aesni_session *ses = NULL;
251 	struct cryptodesc *crd, *enccrd;
252 	int error;
253 
254 	error = 0;
255 	enccrd = NULL;
256 
257 	/* Sanity check. */
258 	if (crp == NULL)
259 		return (EINVAL);
260 
261 	if (crp->crp_callback == NULL || crp->crp_desc == NULL) {
262 		error = EINVAL;
263 		goto out;
264 	}
265 
266 	for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) {
267 		switch (crd->crd_alg) {
268 		case CRYPTO_AES_CBC:
269 		case CRYPTO_AES_XTS:
270 			if (enccrd != NULL) {
271 				error = EINVAL;
272 				goto out;
273 			}
274 			enccrd = crd;
275 			break;
276 		default:
277 			return (EINVAL);
278 		}
279 	}
280 	if (enccrd == NULL || (enccrd->crd_len % AES_BLOCK_LEN) != 0) {
281 		error = EINVAL;
282 		goto out;
283 	}
284 
285 	rw_rlock(&sc->lock);
286 	TAILQ_FOREACH_REVERSE(ses, &sc->sessions, aesni_sessions_head, next) {
287 		if (ses->id == (crp->crp_sid & 0xffffffff))
288 			break;
289 	}
290 	rw_runlock(&sc->lock);
291 	if (ses == NULL) {
292 		error = EINVAL;
293 		goto out;
294 	}
295 
296 	error = aesni_cipher_process(ses, enccrd, crp);
297 	if (error != 0)
298 		goto out;
299 
300 out:
301 	crp->crp_etype = error;
302 	crypto_done(crp);
303 	return (error);
304 }
305 
306 uint8_t *
307 aesni_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp,
308     int *allocated)
309 {
310 	struct uio *uio;
311 	struct iovec *iov;
312 	uint8_t *addr;
313 
314 	if (crp->crp_flags & CRYPTO_F_IMBUF)
315 		goto alloc;
316 	else if (crp->crp_flags & CRYPTO_F_IOV) {
317 		uio = (struct uio *)crp->crp_buf;
318 		if (uio->uio_iovcnt != 1)
319 			goto alloc;
320 		iov = uio->uio_iov;
321 		addr = (u_char *)iov->iov_base + enccrd->crd_skip;
322 	} else
323 		addr = (u_char *)crp->crp_buf;
324 	*allocated = 0;
325 	return (addr);
326 
327 alloc:
328 	addr = malloc(enccrd->crd_len, M_AESNI, M_NOWAIT);
329 	if (addr != NULL) {
330 		*allocated = 1;
331 		crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
332 		    enccrd->crd_len, addr);
333 	} else
334 		*allocated = 0;
335 	return (addr);
336 }
337 
338 static device_method_t aesni_methods[] = {
339 	DEVMETHOD(device_identify, aesni_identify),
340 	DEVMETHOD(device_probe, aesni_probe),
341 	DEVMETHOD(device_attach, aesni_attach),
342 	DEVMETHOD(device_detach, aesni_detach),
343 
344 	DEVMETHOD(cryptodev_newsession, aesni_newsession),
345 	DEVMETHOD(cryptodev_freesession, aesni_freesession),
346 	DEVMETHOD(cryptodev_process, aesni_process),
347 
348 	{0, 0},
349 };
350 
351 static driver_t aesni_driver = {
352 	"aesni",
353 	aesni_methods,
354 	sizeof(struct aesni_softc),
355 };
356 static devclass_t aesni_devclass;
357 
358 DRIVER_MODULE(aesni, nexus, aesni_driver, aesni_devclass, 0, 0);
359 MODULE_VERSION(aesni, 1);
360 MODULE_DEPEND(aesni, crypto, 1, 1, 1);
361 
362 static int
363 aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini)
364 {
365 	struct thread *td;
366 	int error;
367 
368 	td = curthread;
369 	error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL |
370 	    FPU_KERN_KTHR);
371 	if (error != 0)
372 		return (error);
373 	error = aesni_cipher_setup_common(ses, encini->cri_key,
374 	    encini->cri_klen);
375 	fpu_kern_leave(td, ses->fpu_ctx);
376 	return (error);
377 }
378 
379 static int
380 aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
381     struct cryptop *crp)
382 {
383 	struct thread *td;
384 	uint8_t *buf;
385 	int error, allocated;
386 
387 	buf = aesni_cipher_alloc(enccrd, crp, &allocated);
388 	if (buf == NULL)
389 		return (ENOMEM);
390 
391 	td = curthread;
392 	error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL |
393 	    FPU_KERN_KTHR);
394 	if (error != 0)
395 		goto out1;
396 
397 	if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
398 		error = aesni_cipher_setup_common(ses, enccrd->crd_key,
399 		    enccrd->crd_klen);
400 		if (error != 0)
401 			goto out;
402 	}
403 
404 	if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
405 		if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
406 			bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
407 		if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
408 			crypto_copyback(crp->crp_flags, crp->crp_buf,
409 			    enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
410 		if (ses->algo == CRYPTO_AES_CBC) {
411 			aesni_encrypt_cbc(ses->rounds, ses->enc_schedule,
412 			    enccrd->crd_len, buf, buf, ses->iv);
413 		} else /* if (ses->algo == CRYPTO_AES_XTS) */ {
414 			aesni_encrypt_xts(ses->rounds, ses->enc_schedule,
415 			    ses->xts_schedule, enccrd->crd_len, buf, buf,
416 			    ses->iv);
417 		}
418 	} else {
419 		if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
420 			bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
421 		else
422 			crypto_copydata(crp->crp_flags, crp->crp_buf,
423 			    enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
424 		if (ses->algo == CRYPTO_AES_CBC) {
425 			aesni_decrypt_cbc(ses->rounds, ses->dec_schedule,
426 			    enccrd->crd_len, buf, ses->iv);
427 		} else /* if (ses->algo == CRYPTO_AES_XTS) */ {
428 			aesni_decrypt_xts(ses->rounds, ses->dec_schedule,
429 			    ses->xts_schedule, enccrd->crd_len, buf, buf,
430 			    ses->iv);
431 		}
432 	}
433 	if (allocated)
434 		crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
435 		    enccrd->crd_len, buf);
436 	if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0)
437 		crypto_copydata(crp->crp_flags, crp->crp_buf,
438 		    enccrd->crd_skip + enccrd->crd_len - AES_BLOCK_LEN,
439 		    AES_BLOCK_LEN, ses->iv);
440 out:
441 	fpu_kern_leave(td, ses->fpu_ctx);
442 out1:
443 	if (allocated) {
444 		bzero(buf, enccrd->crd_len);
445 		free(buf, M_AESNI);
446 	}
447 	return (error);
448 }
449