16810ad6fSSam Leffler /*- 26810ad6fSSam Leffler * Copyright (c) 2002-2006 Sam Leffler. All rights reserved. 36810ad6fSSam Leffler * 46810ad6fSSam Leffler * Redistribution and use in source and binary forms, with or without 56810ad6fSSam Leffler * modification, are permitted provided that the following conditions 66810ad6fSSam Leffler * are met: 76810ad6fSSam Leffler * 1. Redistributions of source code must retain the above copyright 86810ad6fSSam Leffler * notice, this list of conditions and the following disclaimer. 96810ad6fSSam Leffler * 2. Redistributions in binary form must reproduce the above copyright 106810ad6fSSam Leffler * notice, this list of conditions and the following disclaimer in the 116810ad6fSSam Leffler * documentation and/or other materials provided with the distribution. 126810ad6fSSam Leffler * 136810ad6fSSam Leffler * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 146810ad6fSSam Leffler * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 156810ad6fSSam Leffler * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 166810ad6fSSam Leffler * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 176810ad6fSSam Leffler * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 186810ad6fSSam Leffler * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 196810ad6fSSam Leffler * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 206810ad6fSSam Leffler * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 216810ad6fSSam Leffler * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 226810ad6fSSam Leffler * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 236810ad6fSSam Leffler */ 246810ad6fSSam Leffler 256810ad6fSSam Leffler #include <sys/cdefs.h> 266810ad6fSSam Leffler __FBSDID("$FreeBSD$"); 276810ad6fSSam Leffler 286810ad6fSSam Leffler /* 296810ad6fSSam Leffler * Cryptographic Subsystem. 306810ad6fSSam Leffler * 316810ad6fSSam Leffler * This code is derived from the Openbsd Cryptographic Framework (OCF) 326810ad6fSSam Leffler * that has the copyright shown below. Very little of the original 336810ad6fSSam Leffler * code remains. 346810ad6fSSam Leffler */ 356810ad6fSSam Leffler 3660727d8bSWarner Losh /*- 37091d81d1SSam Leffler * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 38091d81d1SSam Leffler * 39091d81d1SSam Leffler * This code was written by Angelos D. Keromytis in Athens, Greece, in 40091d81d1SSam Leffler * February 2000. Network Security Technologies Inc. (NSTI) kindly 41091d81d1SSam Leffler * supported the development of this code. 42091d81d1SSam Leffler * 43091d81d1SSam Leffler * Copyright (c) 2000, 2001 Angelos D. Keromytis 44091d81d1SSam Leffler * 45091d81d1SSam Leffler * Permission to use, copy, and modify this software with or without fee 46091d81d1SSam Leffler * is hereby granted, provided that this entire notice is included in 47091d81d1SSam Leffler * all source code copies of any software which is or includes a copy or 48091d81d1SSam Leffler * modification of this software. 49091d81d1SSam Leffler * 50091d81d1SSam Leffler * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 51091d81d1SSam Leffler * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 52091d81d1SSam Leffler * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 53091d81d1SSam Leffler * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 54091d81d1SSam Leffler * PURPOSE. 55091d81d1SSam Leffler */ 562c446514SDavid E. O'Brien 577d1853eeSSam Leffler #define CRYPTO_TIMING /* enable timing support */ 58091d81d1SSam Leffler 596810ad6fSSam Leffler #include "opt_ddb.h" 606810ad6fSSam Leffler 61091d81d1SSam Leffler #include <sys/param.h> 62091d81d1SSam Leffler #include <sys/systm.h> 63091d81d1SSam Leffler #include <sys/eventhandler.h> 64091d81d1SSam Leffler #include <sys/kernel.h> 65091d81d1SSam Leffler #include <sys/kthread.h> 66ec5c0e5bSAllan Jude #include <sys/linker.h> 67091d81d1SSam Leffler #include <sys/lock.h> 685dba30f1SPoul-Henning Kamp #include <sys/module.h> 69091d81d1SSam Leffler #include <sys/mutex.h> 70091d81d1SSam Leffler #include <sys/malloc.h> 71091d81d1SSam Leffler #include <sys/proc.h> 72df21ad6eSBjoern A. Zeeb #include <sys/sdt.h> 7339bbca6fSFabien Thomas #include <sys/smp.h> 74091d81d1SSam Leffler #include <sys/sysctl.h> 7539bbca6fSFabien Thomas #include <sys/taskqueue.h> 76091d81d1SSam Leffler 776810ad6fSSam Leffler #include <ddb/ddb.h> 786810ad6fSSam Leffler 79091d81d1SSam Leffler #include <vm/uma.h> 80ec5c0e5bSAllan Jude #include <crypto/intake.h> 81091d81d1SSam Leffler #include <opencrypto/cryptodev.h> 821a91ccccSSam Leffler #include <opencrypto/xform.h> /* XXX for M_XDATA */ 83091d81d1SSam Leffler 846810ad6fSSam Leffler #include <sys/kobj.h> 856810ad6fSSam Leffler #include <sys/bus.h> 866810ad6fSSam Leffler #include "cryptodev_if.h" 876810ad6fSSam Leffler 886ed982a2SAndrew Turner #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 8904c49e68SKonstantin Belousov #include <machine/pcb.h> 9004c49e68SKonstantin Belousov #endif 9104c49e68SKonstantin Belousov 92df21ad6eSBjoern A. Zeeb SDT_PROVIDER_DEFINE(opencrypto); 93df21ad6eSBjoern A. Zeeb 94091d81d1SSam Leffler /* 95091d81d1SSam Leffler * Crypto drivers register themselves by allocating a slot in the 96091d81d1SSam Leffler * crypto_drivers table with crypto_get_driverid() and then registering 97091d81d1SSam Leffler * each algorithm they support with crypto_register() and crypto_kregister(). 98091d81d1SSam Leffler */ 99091d81d1SSam Leffler static struct mtx crypto_drivers_mtx; /* lock on driver table */ 100091d81d1SSam Leffler #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) 101091d81d1SSam Leffler #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) 1026810ad6fSSam Leffler #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED) 1036810ad6fSSam Leffler 1046810ad6fSSam Leffler /* 1056810ad6fSSam Leffler * Crypto device/driver capabilities structure. 1066810ad6fSSam Leffler * 1076810ad6fSSam Leffler * Synchronization: 1086810ad6fSSam Leffler * (d) - protected by CRYPTO_DRIVER_LOCK() 1096810ad6fSSam Leffler * (q) - protected by CRYPTO_Q_LOCK() 1106810ad6fSSam Leffler * Not tagged fields are read-only. 1116810ad6fSSam Leffler */ 1126810ad6fSSam Leffler struct cryptocap { 1136810ad6fSSam Leffler device_t cc_dev; /* (d) device/driver */ 1146810ad6fSSam Leffler u_int32_t cc_sessions; /* (d) # of sessions */ 1156810ad6fSSam Leffler u_int32_t cc_koperations; /* (d) # os asym operations */ 1166810ad6fSSam Leffler /* 1176810ad6fSSam Leffler * Largest possible operator length (in bits) for each type of 1186810ad6fSSam Leffler * encryption algorithm. XXX not used 1196810ad6fSSam Leffler */ 1206810ad6fSSam Leffler u_int16_t cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1]; 1216810ad6fSSam Leffler u_int8_t cc_alg[CRYPTO_ALGORITHM_MAX + 1]; 1226810ad6fSSam Leffler u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1]; 1236810ad6fSSam Leffler 1246810ad6fSSam Leffler int cc_flags; /* (d) flags */ 1256810ad6fSSam Leffler #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */ 1266810ad6fSSam Leffler int cc_qblocked; /* (q) symmetric q blocked */ 1276810ad6fSSam Leffler int cc_kqblocked; /* (q) asymmetric q blocked */ 1286810ad6fSSam Leffler }; 129091d81d1SSam Leffler static struct cryptocap *crypto_drivers = NULL; 130091d81d1SSam Leffler static int crypto_drivers_num = 0; 131091d81d1SSam Leffler 132091d81d1SSam Leffler /* 133091d81d1SSam Leffler * There are two queues for crypto requests; one for symmetric (e.g. 134091d81d1SSam Leffler * cipher) operations and one for asymmetric (e.g. MOD)operations. 135091d81d1SSam Leffler * A single mutex is used to lock access to both queues. We could 136091d81d1SSam Leffler * have one per-queue but having one simplifies handling of block/unblock 137091d81d1SSam Leffler * operations. 138091d81d1SSam Leffler */ 1393a865c82SPawel Jakub Dawidek static int crp_sleep = 0; 14039bbca6fSFabien Thomas static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */ 141091d81d1SSam Leffler static TAILQ_HEAD(,cryptkop) crp_kq; 142091d81d1SSam Leffler static struct mtx crypto_q_mtx; 143091d81d1SSam Leffler #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) 144091d81d1SSam Leffler #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) 145091d81d1SSam Leffler 146091d81d1SSam Leffler /* 14739bbca6fSFabien Thomas * Taskqueue used to dispatch the crypto requests 14839bbca6fSFabien Thomas * that have the CRYPTO_F_ASYNC flag 149091d81d1SSam Leffler */ 15039bbca6fSFabien Thomas static struct taskqueue *crypto_tq; 15139bbca6fSFabien Thomas 15239bbca6fSFabien Thomas /* 15339bbca6fSFabien Thomas * Crypto seq numbers are operated on with modular arithmetic 15439bbca6fSFabien Thomas */ 15539bbca6fSFabien Thomas #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0) 15639bbca6fSFabien Thomas 15739bbca6fSFabien Thomas struct crypto_ret_worker { 15839bbca6fSFabien Thomas struct mtx crypto_ret_mtx; 15939bbca6fSFabien Thomas 16039bbca6fSFabien Thomas TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */ 16139bbca6fSFabien Thomas TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */ 16239bbca6fSFabien Thomas TAILQ_HEAD(,cryptkop) crp_ret_kq; /* callback queue for asym jobs */ 16339bbca6fSFabien Thomas 16439bbca6fSFabien Thomas u_int32_t reorder_ops; /* total ordered sym jobs received */ 16539bbca6fSFabien Thomas u_int32_t reorder_cur_seq; /* current sym job dispatched */ 16639bbca6fSFabien Thomas 16739bbca6fSFabien Thomas struct proc *cryptoretproc; 16839bbca6fSFabien Thomas }; 16939bbca6fSFabien Thomas static struct crypto_ret_worker *crypto_ret_workers = NULL; 17039bbca6fSFabien Thomas 17139bbca6fSFabien Thomas #define CRYPTO_RETW(i) (&crypto_ret_workers[i]) 17239bbca6fSFabien Thomas #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers) 17339bbca6fSFabien Thomas #define FOREACH_CRYPTO_RETW(w) \ 17439bbca6fSFabien Thomas for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w) 17539bbca6fSFabien Thomas 17639bbca6fSFabien Thomas #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx) 17739bbca6fSFabien Thomas #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx) 17839bbca6fSFabien Thomas #define CRYPTO_RETW_EMPTY(w) \ 17939bbca6fSFabien Thomas (TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq) && TAILQ_EMPTY(&w->crp_ordered_ret_q)) 18039bbca6fSFabien Thomas 18139bbca6fSFabien Thomas static int crypto_workers_num = 0; 18239bbca6fSFabien Thomas SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN, 18339bbca6fSFabien Thomas &crypto_workers_num, 0, 18439bbca6fSFabien Thomas "Number of crypto workers used to dispatch crypto jobs"); 185091d81d1SSam Leffler 186091d81d1SSam Leffler static uma_zone_t cryptop_zone; 187091d81d1SSam Leffler static uma_zone_t cryptodesc_zone; 188091d81d1SSam Leffler 189091d81d1SSam Leffler int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */ 190091d81d1SSam Leffler SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW, 191091d81d1SSam Leffler &crypto_userasymcrypto, 0, 192091d81d1SSam Leffler "Enable/disable user-mode access to asymmetric crypto support"); 1936c20d7a3SJohn-Mark Gurney int crypto_devallowsoft = 0; /* only use hardware crypto */ 194091d81d1SSam Leffler SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW, 195091d81d1SSam Leffler &crypto_devallowsoft, 0, 1966c20d7a3SJohn-Mark Gurney "Enable/disable use of software crypto by /dev/crypto"); 197091d81d1SSam Leffler 198091d81d1SSam Leffler MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); 199091d81d1SSam Leffler 20051e45326SSam Leffler static void crypto_proc(void); 20151e45326SSam Leffler static struct proc *cryptoproc; 20239bbca6fSFabien Thomas static void crypto_ret_proc(struct crypto_ret_worker *ret_worker); 20351e45326SSam Leffler static void crypto_destroy(void); 2044acae0acSPawel Jakub Dawidek static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); 2056810ad6fSSam Leffler static int crypto_kinvoke(struct cryptkop *krp, int flags); 20639bbca6fSFabien Thomas static void crypto_task_invoke(void *ctx, int pending); 20739bbca6fSFabien Thomas static void crypto_batch_enqueue(struct cryptop *crp); 20851e45326SSam Leffler 2097d1853eeSSam Leffler static struct cryptostats cryptostats; 2107d1853eeSSam Leffler SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats, 2117d1853eeSSam Leffler cryptostats, "Crypto system statistics"); 2127d1853eeSSam Leffler 2137d1853eeSSam Leffler #ifdef CRYPTO_TIMING 2147d1853eeSSam Leffler static int crypto_timing = 0; 2157d1853eeSSam Leffler SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW, 2167d1853eeSSam Leffler &crypto_timing, 0, "Enable/disable crypto timing support"); 2177d1853eeSSam Leffler #endif 2187d1853eeSSam Leffler 219ec5c0e5bSAllan Jude /* Try to avoid directly exposing the key buffer as a symbol */ 220ec5c0e5bSAllan Jude static struct keybuf *keybuf; 221ec5c0e5bSAllan Jude 222ec5c0e5bSAllan Jude static struct keybuf empty_keybuf = { 223ec5c0e5bSAllan Jude .kb_nents = 0 224ec5c0e5bSAllan Jude }; 225ec5c0e5bSAllan Jude 226ec5c0e5bSAllan Jude /* Obtain the key buffer from boot metadata */ 227ec5c0e5bSAllan Jude static void 228ec5c0e5bSAllan Jude keybuf_init(void) 229ec5c0e5bSAllan Jude { 230ec5c0e5bSAllan Jude caddr_t kmdp; 231ec5c0e5bSAllan Jude 232ec5c0e5bSAllan Jude kmdp = preload_search_by_type("elf kernel"); 233ec5c0e5bSAllan Jude 234ec5c0e5bSAllan Jude if (kmdp == NULL) 235ec5c0e5bSAllan Jude kmdp = preload_search_by_type("elf64 kernel"); 236ec5c0e5bSAllan Jude 237ec5c0e5bSAllan Jude keybuf = (struct keybuf *)preload_search_info(kmdp, 238ec5c0e5bSAllan Jude MODINFO_METADATA | MODINFOMD_KEYBUF); 239ec5c0e5bSAllan Jude 240ec5c0e5bSAllan Jude if (keybuf == NULL) 241ec5c0e5bSAllan Jude keybuf = &empty_keybuf; 242ec5c0e5bSAllan Jude } 243ec5c0e5bSAllan Jude 244ec5c0e5bSAllan Jude /* It'd be nice if we could store these in some kind of secure memory... */ 245ec5c0e5bSAllan Jude struct keybuf * get_keybuf(void) { 246ec5c0e5bSAllan Jude 247ec5c0e5bSAllan Jude return (keybuf); 248ec5c0e5bSAllan Jude } 249ec5c0e5bSAllan Jude 25051e45326SSam Leffler static int 251091d81d1SSam Leffler crypto_init(void) 252091d81d1SSam Leffler { 25339bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 25451e45326SSam Leffler int error; 255091d81d1SSam Leffler 2563569ae7fSSam Leffler mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table", 2573569ae7fSSam Leffler MTX_DEF|MTX_QUIET); 258091d81d1SSam Leffler 259091d81d1SSam Leffler TAILQ_INIT(&crp_q); 260091d81d1SSam Leffler TAILQ_INIT(&crp_kq); 2613569ae7fSSam Leffler mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF); 262091d81d1SSam Leffler 26351e45326SSam Leffler cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop), 26451e45326SSam Leffler 0, 0, 0, 0, 26551e45326SSam Leffler UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 26651e45326SSam Leffler cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc), 26751e45326SSam Leffler 0, 0, 0, 0, 26851e45326SSam Leffler UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 26951e45326SSam Leffler if (cryptodesc_zone == NULL || cryptop_zone == NULL) { 27051e45326SSam Leffler printf("crypto_init: cannot setup crypto zones\n"); 27151e45326SSam Leffler error = ENOMEM; 27251e45326SSam Leffler goto bad; 27351e45326SSam Leffler } 27451e45326SSam Leffler 27551e45326SSam Leffler crypto_drivers_num = CRYPTO_DRIVERS_INITIAL; 27651e45326SSam Leffler crypto_drivers = malloc(crypto_drivers_num * 27751e45326SSam Leffler sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO); 27851e45326SSam Leffler if (crypto_drivers == NULL) { 27951e45326SSam Leffler printf("crypto_init: cannot setup crypto drivers\n"); 28051e45326SSam Leffler error = ENOMEM; 28151e45326SSam Leffler goto bad; 28251e45326SSam Leffler } 28351e45326SSam Leffler 28439bbca6fSFabien Thomas if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus) 28539bbca6fSFabien Thomas crypto_workers_num = mp_ncpus; 28639bbca6fSFabien Thomas 28739bbca6fSFabien Thomas crypto_tq = taskqueue_create("crypto", M_WAITOK|M_ZERO, 28839bbca6fSFabien Thomas taskqueue_thread_enqueue, &crypto_tq); 28939bbca6fSFabien Thomas if (crypto_tq == NULL) { 29039bbca6fSFabien Thomas printf("crypto init: cannot setup crypto taskqueue\n"); 29139bbca6fSFabien Thomas error = ENOMEM; 29239bbca6fSFabien Thomas goto bad; 29339bbca6fSFabien Thomas } 29439bbca6fSFabien Thomas 29539bbca6fSFabien Thomas taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN, 29639bbca6fSFabien Thomas "crypto"); 29739bbca6fSFabien Thomas 2983745c395SJulian Elischer error = kproc_create((void (*)(void *)) crypto_proc, NULL, 29951e45326SSam Leffler &cryptoproc, 0, 0, "crypto"); 30051e45326SSam Leffler if (error) { 30151e45326SSam Leffler printf("crypto_init: cannot start crypto thread; error %d", 30251e45326SSam Leffler error); 30351e45326SSam Leffler goto bad; 30451e45326SSam Leffler } 30551e45326SSam Leffler 30639bbca6fSFabien Thomas crypto_ret_workers = malloc(crypto_workers_num * sizeof(struct crypto_ret_worker), 30739bbca6fSFabien Thomas M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 30839bbca6fSFabien Thomas if (crypto_ret_workers == NULL) { 30939bbca6fSFabien Thomas error = ENOMEM; 31039bbca6fSFabien Thomas printf("crypto_init: cannot allocate ret workers\n"); 31139bbca6fSFabien Thomas goto bad; 31239bbca6fSFabien Thomas } 31339bbca6fSFabien Thomas 31439bbca6fSFabien Thomas 31539bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) { 31639bbca6fSFabien Thomas TAILQ_INIT(&ret_worker->crp_ordered_ret_q); 31739bbca6fSFabien Thomas TAILQ_INIT(&ret_worker->crp_ret_q); 31839bbca6fSFabien Thomas TAILQ_INIT(&ret_worker->crp_ret_kq); 31939bbca6fSFabien Thomas 32039bbca6fSFabien Thomas ret_worker->reorder_ops = 0; 32139bbca6fSFabien Thomas ret_worker->reorder_cur_seq = 0; 32239bbca6fSFabien Thomas 32339bbca6fSFabien Thomas mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF); 32439bbca6fSFabien Thomas 32539bbca6fSFabien Thomas error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker, 32639bbca6fSFabien Thomas &ret_worker->cryptoretproc, 0, 0, "crypto returns %td", CRYPTO_RETW_ID(ret_worker)); 32751e45326SSam Leffler if (error) { 32851e45326SSam Leffler printf("crypto_init: cannot start cryptoret thread; error %d", 32951e45326SSam Leffler error); 33051e45326SSam Leffler goto bad; 33151e45326SSam Leffler } 33239bbca6fSFabien Thomas } 333ec5c0e5bSAllan Jude 334ec5c0e5bSAllan Jude keybuf_init(); 335ec5c0e5bSAllan Jude 33651e45326SSam Leffler return 0; 33751e45326SSam Leffler bad: 33851e45326SSam Leffler crypto_destroy(); 33951e45326SSam Leffler return error; 34051e45326SSam Leffler } 34151e45326SSam Leffler 34251e45326SSam Leffler /* 34351e45326SSam Leffler * Signal a crypto thread to terminate. We use the driver 34451e45326SSam Leffler * table lock to synchronize the sleep/wakeups so that we 34551e45326SSam Leffler * are sure the threads have terminated before we release 34651e45326SSam Leffler * the data structures they use. See crypto_finis below 34751e45326SSam Leffler * for the other half of this song-and-dance. 34851e45326SSam Leffler */ 34951e45326SSam Leffler static void 35051e45326SSam Leffler crypto_terminate(struct proc **pp, void *q) 35151e45326SSam Leffler { 35251e45326SSam Leffler struct proc *p; 35351e45326SSam Leffler 35451e45326SSam Leffler mtx_assert(&crypto_drivers_mtx, MA_OWNED); 35551e45326SSam Leffler p = *pp; 35651e45326SSam Leffler *pp = NULL; 35751e45326SSam Leffler if (p) { 35851e45326SSam Leffler wakeup_one(q); 35951e45326SSam Leffler PROC_LOCK(p); /* NB: insure we don't miss wakeup */ 36051e45326SSam Leffler CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */ 36151e45326SSam Leffler msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0); 36251e45326SSam Leffler PROC_UNLOCK(p); 36351e45326SSam Leffler CRYPTO_DRIVER_LOCK(); 36451e45326SSam Leffler } 36551e45326SSam Leffler } 36651e45326SSam Leffler 36751e45326SSam Leffler static void 36851e45326SSam Leffler crypto_destroy(void) 36951e45326SSam Leffler { 37039bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 37139bbca6fSFabien Thomas 37251e45326SSam Leffler /* 37351e45326SSam Leffler * Terminate any crypto threads. 37451e45326SSam Leffler */ 37539bbca6fSFabien Thomas if (crypto_tq != NULL) 37639bbca6fSFabien Thomas taskqueue_drain_all(crypto_tq); 37751e45326SSam Leffler CRYPTO_DRIVER_LOCK(); 37851e45326SSam Leffler crypto_terminate(&cryptoproc, &crp_q); 37939bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) 38039bbca6fSFabien Thomas crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q); 38151e45326SSam Leffler CRYPTO_DRIVER_UNLOCK(); 38251e45326SSam Leffler 38351e45326SSam Leffler /* XXX flush queues??? */ 38451e45326SSam Leffler 38551e45326SSam Leffler /* 38651e45326SSam Leffler * Reclaim dynamically allocated resources. 38751e45326SSam Leffler */ 38851e45326SSam Leffler if (crypto_drivers != NULL) 38951e45326SSam Leffler free(crypto_drivers, M_CRYPTO_DATA); 39051e45326SSam Leffler 39151e45326SSam Leffler if (cryptodesc_zone != NULL) 39251e45326SSam Leffler uma_zdestroy(cryptodesc_zone); 39351e45326SSam Leffler if (cryptop_zone != NULL) 39451e45326SSam Leffler uma_zdestroy(cryptop_zone); 39551e45326SSam Leffler mtx_destroy(&crypto_q_mtx); 39639bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) 39739bbca6fSFabien Thomas mtx_destroy(&ret_worker->crypto_ret_mtx); 39839bbca6fSFabien Thomas free(crypto_ret_workers, M_CRYPTO_DATA); 39939bbca6fSFabien Thomas if (crypto_tq != NULL) 40039bbca6fSFabien Thomas taskqueue_free(crypto_tq); 40151e45326SSam Leffler mtx_destroy(&crypto_drivers_mtx); 402091d81d1SSam Leffler } 403f544a528SMark Murray 4046810ad6fSSam Leffler static struct cryptocap * 4056810ad6fSSam Leffler crypto_checkdriver(u_int32_t hid) 4066810ad6fSSam Leffler { 4076810ad6fSSam Leffler if (crypto_drivers == NULL) 4086810ad6fSSam Leffler return NULL; 4096810ad6fSSam Leffler return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]); 4106810ad6fSSam Leffler } 4116810ad6fSSam Leffler 412f544a528SMark Murray /* 4136810ad6fSSam Leffler * Compare a driver's list of supported algorithms against another 4146810ad6fSSam Leffler * list; return non-zero if all algorithms are supported. 415f544a528SMark Murray */ 416f544a528SMark Murray static int 4176810ad6fSSam Leffler driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri) 418f544a528SMark Murray { 4196810ad6fSSam Leffler const struct cryptoini *cr; 42051e45326SSam Leffler 4216810ad6fSSam Leffler /* See if all the algorithms are supported. */ 4226810ad6fSSam Leffler for (cr = cri; cr; cr = cr->cri_next) 4236810ad6fSSam Leffler if (cap->cc_alg[cr->cri_alg] == 0) 424f544a528SMark Murray return 0; 4256810ad6fSSam Leffler return 1; 426f544a528SMark Murray } 427f544a528SMark Murray 428091d81d1SSam Leffler /* 4296810ad6fSSam Leffler * Select a driver for a new session that supports the specified 4306810ad6fSSam Leffler * algorithms and, optionally, is constrained according to the flags. 431091d81d1SSam Leffler * The algorithm we use here is pretty stupid; just use the 4326810ad6fSSam Leffler * first driver that supports all the algorithms we need. If there 4336810ad6fSSam Leffler * are multiple drivers we choose the driver with the fewest active 4346810ad6fSSam Leffler * sessions. We prefer hardware-backed drivers to software ones. 435091d81d1SSam Leffler * 436091d81d1SSam Leffler * XXX We need more smarts here (in real life too, but that's 437091d81d1SSam Leffler * XXX another story altogether). 438091d81d1SSam Leffler */ 4396810ad6fSSam Leffler static struct cryptocap * 4406810ad6fSSam Leffler crypto_select_driver(const struct cryptoini *cri, int flags) 4416810ad6fSSam Leffler { 4426810ad6fSSam Leffler struct cryptocap *cap, *best; 4436810ad6fSSam Leffler int match, hid; 4446810ad6fSSam Leffler 4456810ad6fSSam Leffler CRYPTO_DRIVER_ASSERT(); 446091d81d1SSam Leffler 447694e0113SPawel Jakub Dawidek /* 4486810ad6fSSam Leffler * Look first for hardware crypto devices if permitted. 449694e0113SPawel Jakub Dawidek */ 4506810ad6fSSam Leffler if (flags & CRYPTOCAP_F_HARDWARE) 4516810ad6fSSam Leffler match = CRYPTOCAP_F_HARDWARE; 4526810ad6fSSam Leffler else 4536810ad6fSSam Leffler match = CRYPTOCAP_F_SOFTWARE; 4546810ad6fSSam Leffler best = NULL; 4556810ad6fSSam Leffler again: 456091d81d1SSam Leffler for (hid = 0; hid < crypto_drivers_num; hid++) { 457694e0113SPawel Jakub Dawidek cap = &crypto_drivers[hid]; 458091d81d1SSam Leffler /* 4596810ad6fSSam Leffler * If it's not initialized, is in the process of 4606810ad6fSSam Leffler * going away, or is not appropriate (hardware 4616810ad6fSSam Leffler * or software based on match), then skip. 462091d81d1SSam Leffler */ 4636810ad6fSSam Leffler if (cap->cc_dev == NULL || 4646810ad6fSSam Leffler (cap->cc_flags & CRYPTOCAP_F_CLEANUP) || 4656810ad6fSSam Leffler (cap->cc_flags & match) == 0) 466091d81d1SSam Leffler continue; 467091d81d1SSam Leffler 4686810ad6fSSam Leffler /* verify all the algorithms are supported. */ 4696810ad6fSSam Leffler if (driver_suitable(cap, cri)) { 4706810ad6fSSam Leffler if (best == NULL || 4716810ad6fSSam Leffler cap->cc_sessions < best->cc_sessions) 4726810ad6fSSam Leffler best = cap; 4736810ad6fSSam Leffler } 4746810ad6fSSam Leffler } 47508fca7a5SJohn-Mark Gurney if (best == NULL && match == CRYPTOCAP_F_HARDWARE && 47608fca7a5SJohn-Mark Gurney (flags & CRYPTOCAP_F_SOFTWARE)) { 4776810ad6fSSam Leffler /* sort of an Algol 68-style for loop */ 4786810ad6fSSam Leffler match = CRYPTOCAP_F_SOFTWARE; 4796810ad6fSSam Leffler goto again; 4806810ad6fSSam Leffler } 4816810ad6fSSam Leffler return best; 4826810ad6fSSam Leffler } 483091d81d1SSam Leffler 484694e0113SPawel Jakub Dawidek /* 4856810ad6fSSam Leffler * Create a new session. The crid argument specifies a crypto 4866810ad6fSSam Leffler * driver to use or constraints on a driver to select (hardware 4876810ad6fSSam Leffler * only, software only, either). Whatever driver is selected 4886810ad6fSSam Leffler * must be capable of the requested crypto algorithms. 489694e0113SPawel Jakub Dawidek */ 4906810ad6fSSam Leffler int 4916810ad6fSSam Leffler crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid) 4926810ad6fSSam Leffler { 4936810ad6fSSam Leffler struct cryptocap *cap; 4946810ad6fSSam Leffler u_int32_t hid, lid; 4956810ad6fSSam Leffler int err; 4966810ad6fSSam Leffler 4976810ad6fSSam Leffler CRYPTO_DRIVER_LOCK(); 4986810ad6fSSam Leffler if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 499694e0113SPawel Jakub Dawidek /* 5006810ad6fSSam Leffler * Use specified driver; verify it is capable. 501694e0113SPawel Jakub Dawidek */ 5026810ad6fSSam Leffler cap = crypto_checkdriver(crid); 5036810ad6fSSam Leffler if (cap != NULL && !driver_suitable(cap, cri)) 504694e0113SPawel Jakub Dawidek cap = NULL; 5056810ad6fSSam Leffler } else { 5066810ad6fSSam Leffler /* 5076810ad6fSSam Leffler * No requested driver; select based on crid flags. 5086810ad6fSSam Leffler */ 5096810ad6fSSam Leffler cap = crypto_select_driver(cri, crid); 5106810ad6fSSam Leffler /* 5116810ad6fSSam Leffler * if NULL then can't do everything in one session. 5126810ad6fSSam Leffler * XXX Fix this. We need to inject a "virtual" session 5136810ad6fSSam Leffler * XXX layer right about here. 5146810ad6fSSam Leffler */ 515694e0113SPawel Jakub Dawidek } 516694e0113SPawel Jakub Dawidek if (cap != NULL) { 517091d81d1SSam Leffler /* Call the driver initialization routine. */ 5186810ad6fSSam Leffler hid = cap - crypto_drivers; 519091d81d1SSam Leffler lid = hid; /* Pass the driver ID. */ 5206810ad6fSSam Leffler err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri); 521091d81d1SSam Leffler if (err == 0) { 5226810ad6fSSam Leffler (*sid) = (cap->cc_flags & 0xff000000) 5236810ad6fSSam Leffler | (hid & 0x00ffffff); 524091d81d1SSam Leffler (*sid) <<= 32; 525091d81d1SSam Leffler (*sid) |= (lid & 0xffffffff); 52607d0c94aSSam Leffler cap->cc_sessions++; 5276810ad6fSSam Leffler } else 52808fca7a5SJohn-Mark Gurney CRYPTDEB("dev newsession failed"); 52908fca7a5SJohn-Mark Gurney } else { 53008fca7a5SJohn-Mark Gurney CRYPTDEB("no driver"); 531a317fb03SConrad Meyer err = EOPNOTSUPP; 53208fca7a5SJohn-Mark Gurney } 533091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 534091d81d1SSam Leffler return err; 535091d81d1SSam Leffler } 536091d81d1SSam Leffler 5374acae0acSPawel Jakub Dawidek static void 5384acae0acSPawel Jakub Dawidek crypto_remove(struct cryptocap *cap) 5394acae0acSPawel Jakub Dawidek { 5404acae0acSPawel Jakub Dawidek 5414acae0acSPawel Jakub Dawidek mtx_assert(&crypto_drivers_mtx, MA_OWNED); 5424acae0acSPawel Jakub Dawidek if (cap->cc_sessions == 0 && cap->cc_koperations == 0) 5434acae0acSPawel Jakub Dawidek bzero(cap, sizeof(*cap)); 5444acae0acSPawel Jakub Dawidek } 5454acae0acSPawel Jakub Dawidek 546091d81d1SSam Leffler /* 547091d81d1SSam Leffler * Delete an existing session (or a reserved session on an unregistered 548091d81d1SSam Leffler * driver). 549091d81d1SSam Leffler */ 550091d81d1SSam Leffler int 551091d81d1SSam Leffler crypto_freesession(u_int64_t sid) 552091d81d1SSam Leffler { 5534acae0acSPawel Jakub Dawidek struct cryptocap *cap; 554091d81d1SSam Leffler u_int32_t hid; 555091d81d1SSam Leffler int err; 556091d81d1SSam Leffler 557091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 558091d81d1SSam Leffler 559091d81d1SSam Leffler if (crypto_drivers == NULL) { 560091d81d1SSam Leffler err = EINVAL; 561091d81d1SSam Leffler goto done; 562091d81d1SSam Leffler } 563091d81d1SSam Leffler 564091d81d1SSam Leffler /* Determine two IDs. */ 56507d0c94aSSam Leffler hid = CRYPTO_SESID2HID(sid); 566091d81d1SSam Leffler 567091d81d1SSam Leffler if (hid >= crypto_drivers_num) { 568091d81d1SSam Leffler err = ENOENT; 569091d81d1SSam Leffler goto done; 570091d81d1SSam Leffler } 5714acae0acSPawel Jakub Dawidek cap = &crypto_drivers[hid]; 572091d81d1SSam Leffler 5734acae0acSPawel Jakub Dawidek if (cap->cc_sessions) 5744acae0acSPawel Jakub Dawidek cap->cc_sessions--; 575091d81d1SSam Leffler 576091d81d1SSam Leffler /* Call the driver cleanup routine, if available. */ 5776810ad6fSSam Leffler err = CRYPTODEV_FREESESSION(cap->cc_dev, sid); 578091d81d1SSam Leffler 5794acae0acSPawel Jakub Dawidek if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) 5804acae0acSPawel Jakub Dawidek crypto_remove(cap); 581091d81d1SSam Leffler 582091d81d1SSam Leffler done: 583091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 584091d81d1SSam Leffler return err; 585091d81d1SSam Leffler } 586091d81d1SSam Leffler 587091d81d1SSam Leffler /* 588091d81d1SSam Leffler * Return an unused driver id. Used by drivers prior to registering 589091d81d1SSam Leffler * support for the algorithms they handle. 590091d81d1SSam Leffler */ 591091d81d1SSam Leffler int32_t 5926810ad6fSSam Leffler crypto_get_driverid(device_t dev, int flags) 593091d81d1SSam Leffler { 594091d81d1SSam Leffler struct cryptocap *newdrv; 595091d81d1SSam Leffler int i; 596091d81d1SSam Leffler 5976810ad6fSSam Leffler if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 5986810ad6fSSam Leffler printf("%s: no flags specified when registering driver\n", 5996810ad6fSSam Leffler device_get_nameunit(dev)); 6006810ad6fSSam Leffler return -1; 6016810ad6fSSam Leffler } 6026810ad6fSSam Leffler 603091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 604091d81d1SSam Leffler 6054acae0acSPawel Jakub Dawidek for (i = 0; i < crypto_drivers_num; i++) { 6066810ad6fSSam Leffler if (crypto_drivers[i].cc_dev == NULL && 6074acae0acSPawel Jakub Dawidek (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) { 608091d81d1SSam Leffler break; 6094acae0acSPawel Jakub Dawidek } 6104acae0acSPawel Jakub Dawidek } 611091d81d1SSam Leffler 612091d81d1SSam Leffler /* Out of entries, allocate some more. */ 613091d81d1SSam Leffler if (i == crypto_drivers_num) { 614091d81d1SSam Leffler /* Be careful about wrap-around. */ 615091d81d1SSam Leffler if (2 * crypto_drivers_num <= crypto_drivers_num) { 616091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 617091d81d1SSam Leffler printf("crypto: driver count wraparound!\n"); 618091d81d1SSam Leffler return -1; 619091d81d1SSam Leffler } 620091d81d1SSam Leffler 621091d81d1SSam Leffler newdrv = malloc(2 * crypto_drivers_num * 622091d81d1SSam Leffler sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 623091d81d1SSam Leffler if (newdrv == NULL) { 624091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 625091d81d1SSam Leffler printf("crypto: no space to expand driver table!\n"); 626091d81d1SSam Leffler return -1; 627091d81d1SSam Leffler } 628091d81d1SSam Leffler 629091d81d1SSam Leffler bcopy(crypto_drivers, newdrv, 630091d81d1SSam Leffler crypto_drivers_num * sizeof(struct cryptocap)); 631091d81d1SSam Leffler 632091d81d1SSam Leffler crypto_drivers_num *= 2; 633091d81d1SSam Leffler 634091d81d1SSam Leffler free(crypto_drivers, M_CRYPTO_DATA); 635091d81d1SSam Leffler crypto_drivers = newdrv; 636091d81d1SSam Leffler } 637091d81d1SSam Leffler 638091d81d1SSam Leffler /* NB: state is zero'd on free */ 639091d81d1SSam Leffler crypto_drivers[i].cc_sessions = 1; /* Mark */ 6406810ad6fSSam Leffler crypto_drivers[i].cc_dev = dev; 641091d81d1SSam Leffler crypto_drivers[i].cc_flags = flags; 642091d81d1SSam Leffler if (bootverbose) 643d7d2f0d4SConrad Meyer printf("crypto: assign %s driver id %u, flags 0x%x\n", 6446810ad6fSSam Leffler device_get_nameunit(dev), i, flags); 645091d81d1SSam Leffler 646091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 647091d81d1SSam Leffler 648091d81d1SSam Leffler return i; 649091d81d1SSam Leffler } 650091d81d1SSam Leffler 6516810ad6fSSam Leffler /* 6526810ad6fSSam Leffler * Lookup a driver by name. We match against the full device 6536810ad6fSSam Leffler * name and unit, and against just the name. The latter gives 6546810ad6fSSam Leffler * us a simple widlcarding by device name. On success return the 6556810ad6fSSam Leffler * driver/hardware identifier; otherwise return -1. 6566810ad6fSSam Leffler */ 6576810ad6fSSam Leffler int 6586810ad6fSSam Leffler crypto_find_driver(const char *match) 659091d81d1SSam Leffler { 6606810ad6fSSam Leffler int i, len = strlen(match); 6616810ad6fSSam Leffler 6626810ad6fSSam Leffler CRYPTO_DRIVER_LOCK(); 6636810ad6fSSam Leffler for (i = 0; i < crypto_drivers_num; i++) { 6646810ad6fSSam Leffler device_t dev = crypto_drivers[i].cc_dev; 6656810ad6fSSam Leffler if (dev == NULL || 6666810ad6fSSam Leffler (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP)) 6676810ad6fSSam Leffler continue; 6686810ad6fSSam Leffler if (strncmp(match, device_get_nameunit(dev), len) == 0 || 6696810ad6fSSam Leffler strncmp(match, device_get_name(dev), len) == 0) 6706810ad6fSSam Leffler break; 6716810ad6fSSam Leffler } 6726810ad6fSSam Leffler CRYPTO_DRIVER_UNLOCK(); 6736810ad6fSSam Leffler return i < crypto_drivers_num ? i : -1; 6746810ad6fSSam Leffler } 6756810ad6fSSam Leffler 6766810ad6fSSam Leffler /* 6776810ad6fSSam Leffler * Return the device_t for the specified driver or NULL 6786810ad6fSSam Leffler * if the driver identifier is invalid. 6796810ad6fSSam Leffler */ 6806810ad6fSSam Leffler device_t 6816810ad6fSSam Leffler crypto_find_device_byhid(int hid) 6826810ad6fSSam Leffler { 6836810ad6fSSam Leffler struct cryptocap *cap = crypto_checkdriver(hid); 6846810ad6fSSam Leffler return cap != NULL ? cap->cc_dev : NULL; 6856810ad6fSSam Leffler } 6866810ad6fSSam Leffler 6876810ad6fSSam Leffler /* 6886810ad6fSSam Leffler * Return the device/driver capabilities. 6896810ad6fSSam Leffler */ 6906810ad6fSSam Leffler int 6916810ad6fSSam Leffler crypto_getcaps(int hid) 6926810ad6fSSam Leffler { 6936810ad6fSSam Leffler struct cryptocap *cap = crypto_checkdriver(hid); 6946810ad6fSSam Leffler return cap != NULL ? cap->cc_flags : 0; 695091d81d1SSam Leffler } 696091d81d1SSam Leffler 697091d81d1SSam Leffler /* 698091d81d1SSam Leffler * Register support for a key-related algorithm. This routine 699091d81d1SSam Leffler * is called once for each algorithm supported a driver. 700091d81d1SSam Leffler */ 701091d81d1SSam Leffler int 7026810ad6fSSam Leffler crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags) 703091d81d1SSam Leffler { 704091d81d1SSam Leffler struct cryptocap *cap; 705091d81d1SSam Leffler int err; 706091d81d1SSam Leffler 707091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 708091d81d1SSam Leffler 709091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 710091d81d1SSam Leffler if (cap != NULL && 711091d81d1SSam Leffler (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { 712091d81d1SSam Leffler /* 713091d81d1SSam Leffler * XXX Do some performance testing to determine placing. 714091d81d1SSam Leffler * XXX We probably need an auxiliary data structure that 715091d81d1SSam Leffler * XXX describes relative performances. 716091d81d1SSam Leffler */ 717091d81d1SSam Leffler 718091d81d1SSam Leffler cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 719091d81d1SSam Leffler if (bootverbose) 7206810ad6fSSam Leffler printf("crypto: %s registers key alg %u flags %u\n" 7216810ad6fSSam Leffler , device_get_nameunit(cap->cc_dev) 722091d81d1SSam Leffler , kalg 723091d81d1SSam Leffler , flags 724091d81d1SSam Leffler ); 725091d81d1SSam Leffler err = 0; 726091d81d1SSam Leffler } else 727091d81d1SSam Leffler err = EINVAL; 728091d81d1SSam Leffler 729091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 730091d81d1SSam Leffler return err; 731091d81d1SSam Leffler } 732091d81d1SSam Leffler 733091d81d1SSam Leffler /* 734091d81d1SSam Leffler * Register support for a non-key-related algorithm. This routine 735091d81d1SSam Leffler * is called once for each such algorithm supported by a driver. 736091d81d1SSam Leffler */ 737091d81d1SSam Leffler int 738091d81d1SSam Leffler crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen, 7396810ad6fSSam Leffler u_int32_t flags) 740091d81d1SSam Leffler { 741091d81d1SSam Leffler struct cryptocap *cap; 742091d81d1SSam Leffler int err; 743091d81d1SSam Leffler 744091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 745091d81d1SSam Leffler 746091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 747091d81d1SSam Leffler /* NB: algorithms are in the range [1..max] */ 748091d81d1SSam Leffler if (cap != NULL && 749091d81d1SSam Leffler (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) { 750091d81d1SSam Leffler /* 751091d81d1SSam Leffler * XXX Do some performance testing to determine placing. 752091d81d1SSam Leffler * XXX We probably need an auxiliary data structure that 753091d81d1SSam Leffler * XXX describes relative performances. 754091d81d1SSam Leffler */ 755091d81d1SSam Leffler 756091d81d1SSam Leffler cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 757091d81d1SSam Leffler cap->cc_max_op_len[alg] = maxoplen; 758091d81d1SSam Leffler if (bootverbose) 7596810ad6fSSam Leffler printf("crypto: %s registers alg %u flags %u maxoplen %u\n" 7606810ad6fSSam Leffler , device_get_nameunit(cap->cc_dev) 761091d81d1SSam Leffler , alg 762091d81d1SSam Leffler , flags 763091d81d1SSam Leffler , maxoplen 764091d81d1SSam Leffler ); 765091d81d1SSam Leffler cap->cc_sessions = 0; /* Unmark */ 766091d81d1SSam Leffler err = 0; 767091d81d1SSam Leffler } else 768091d81d1SSam Leffler err = EINVAL; 769091d81d1SSam Leffler 770091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 771091d81d1SSam Leffler return err; 772091d81d1SSam Leffler } 773091d81d1SSam Leffler 7746810ad6fSSam Leffler static void 7756810ad6fSSam Leffler driver_finis(struct cryptocap *cap) 7766810ad6fSSam Leffler { 7776810ad6fSSam Leffler u_int32_t ses, kops; 7786810ad6fSSam Leffler 7796810ad6fSSam Leffler CRYPTO_DRIVER_ASSERT(); 7806810ad6fSSam Leffler 7816810ad6fSSam Leffler ses = cap->cc_sessions; 7826810ad6fSSam Leffler kops = cap->cc_koperations; 7836810ad6fSSam Leffler bzero(cap, sizeof(*cap)); 7846810ad6fSSam Leffler if (ses != 0 || kops != 0) { 7856810ad6fSSam Leffler /* 7866810ad6fSSam Leffler * If there are pending sessions, 7876810ad6fSSam Leffler * just mark as invalid. 7886810ad6fSSam Leffler */ 7896810ad6fSSam Leffler cap->cc_flags |= CRYPTOCAP_F_CLEANUP; 7906810ad6fSSam Leffler cap->cc_sessions = ses; 7916810ad6fSSam Leffler cap->cc_koperations = kops; 7926810ad6fSSam Leffler } 7936810ad6fSSam Leffler } 7946810ad6fSSam Leffler 795091d81d1SSam Leffler /* 796091d81d1SSam Leffler * Unregister a crypto driver. If there are pending sessions using it, 797091d81d1SSam Leffler * leave enough information around so that subsequent calls using those 798091d81d1SSam Leffler * sessions will correctly detect the driver has been unregistered and 799091d81d1SSam Leffler * reroute requests. 800091d81d1SSam Leffler */ 801091d81d1SSam Leffler int 802091d81d1SSam Leffler crypto_unregister(u_int32_t driverid, int alg) 803091d81d1SSam Leffler { 804091d81d1SSam Leffler struct cryptocap *cap; 8054acae0acSPawel Jakub Dawidek int i, err; 806091d81d1SSam Leffler 807091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 808091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 809091d81d1SSam Leffler if (cap != NULL && 810091d81d1SSam Leffler (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) && 811091d81d1SSam Leffler cap->cc_alg[alg] != 0) { 812091d81d1SSam Leffler cap->cc_alg[alg] = 0; 813091d81d1SSam Leffler cap->cc_max_op_len[alg] = 0; 814091d81d1SSam Leffler 815091d81d1SSam Leffler /* Was this the last algorithm ? */ 816091d81d1SSam Leffler for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++) 817091d81d1SSam Leffler if (cap->cc_alg[i] != 0) 818091d81d1SSam Leffler break; 819091d81d1SSam Leffler 8206810ad6fSSam Leffler if (i == CRYPTO_ALGORITHM_MAX + 1) 8216810ad6fSSam Leffler driver_finis(cap); 822091d81d1SSam Leffler err = 0; 823091d81d1SSam Leffler } else 824091d81d1SSam Leffler err = EINVAL; 825091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 8266810ad6fSSam Leffler 827091d81d1SSam Leffler return err; 828091d81d1SSam Leffler } 829091d81d1SSam Leffler 830091d81d1SSam Leffler /* 831091d81d1SSam Leffler * Unregister all algorithms associated with a crypto driver. 832091d81d1SSam Leffler * If there are pending sessions using it, leave enough information 833091d81d1SSam Leffler * around so that subsequent calls using those sessions will 834091d81d1SSam Leffler * correctly detect the driver has been unregistered and reroute 835091d81d1SSam Leffler * requests. 836091d81d1SSam Leffler */ 837091d81d1SSam Leffler int 838091d81d1SSam Leffler crypto_unregister_all(u_int32_t driverid) 839091d81d1SSam Leffler { 840091d81d1SSam Leffler struct cryptocap *cap; 8416810ad6fSSam Leffler int err; 842091d81d1SSam Leffler 843091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 844091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 845091d81d1SSam Leffler if (cap != NULL) { 8466810ad6fSSam Leffler driver_finis(cap); 847091d81d1SSam Leffler err = 0; 848091d81d1SSam Leffler } else 849091d81d1SSam Leffler err = EINVAL; 850091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 8516810ad6fSSam Leffler 852091d81d1SSam Leffler return err; 853091d81d1SSam Leffler } 854091d81d1SSam Leffler 855091d81d1SSam Leffler /* 856091d81d1SSam Leffler * Clear blockage on a driver. The what parameter indicates whether 857091d81d1SSam Leffler * the driver is now ready for cryptop's and/or cryptokop's. 858091d81d1SSam Leffler */ 859091d81d1SSam Leffler int 860091d81d1SSam Leffler crypto_unblock(u_int32_t driverid, int what) 861091d81d1SSam Leffler { 862091d81d1SSam Leffler struct cryptocap *cap; 8633a865c82SPawel Jakub Dawidek int err; 864091d81d1SSam Leffler 865091d81d1SSam Leffler CRYPTO_Q_LOCK(); 866091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 867091d81d1SSam Leffler if (cap != NULL) { 8683a865c82SPawel Jakub Dawidek if (what & CRYPTO_SYMQ) 869091d81d1SSam Leffler cap->cc_qblocked = 0; 8703a865c82SPawel Jakub Dawidek if (what & CRYPTO_ASYMQ) 871091d81d1SSam Leffler cap->cc_kqblocked = 0; 8723a865c82SPawel Jakub Dawidek if (crp_sleep) 8731a91ccccSSam Leffler wakeup_one(&crp_q); 874091d81d1SSam Leffler err = 0; 875091d81d1SSam Leffler } else 876091d81d1SSam Leffler err = EINVAL; 877091d81d1SSam Leffler CRYPTO_Q_UNLOCK(); 878091d81d1SSam Leffler 879091d81d1SSam Leffler return err; 880091d81d1SSam Leffler } 881091d81d1SSam Leffler 882091d81d1SSam Leffler /* 883091d81d1SSam Leffler * Add a crypto request to a queue, to be processed by the kernel thread. 884091d81d1SSam Leffler */ 885091d81d1SSam Leffler int 886091d81d1SSam Leffler crypto_dispatch(struct cryptop *crp) 887091d81d1SSam Leffler { 8884acae0acSPawel Jakub Dawidek struct cryptocap *cap; 8894acae0acSPawel Jakub Dawidek u_int32_t hid; 8904acae0acSPawel Jakub Dawidek int result; 891091d81d1SSam Leffler 8927d1853eeSSam Leffler cryptostats.cs_ops++; 8937d1853eeSSam Leffler 8947d1853eeSSam Leffler #ifdef CRYPTO_TIMING 8957d1853eeSSam Leffler if (crypto_timing) 8967d1853eeSSam Leffler binuptime(&crp->crp_tstamp); 8977d1853eeSSam Leffler #endif 8987d1853eeSSam Leffler 899*de2b2c90SFabien Thomas crp->crp_retw_id = crp->crp_sid % crypto_workers_num; 900*de2b2c90SFabien Thomas 90139bbca6fSFabien Thomas if (CRYPTOP_ASYNC(crp)) { 90239bbca6fSFabien Thomas if (crp->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) { 90339bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 90439bbca6fSFabien Thomas 90539bbca6fSFabien Thomas ret_worker = CRYPTO_RETW(crp->crp_retw_id); 90639bbca6fSFabien Thomas 90739bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 90839bbca6fSFabien Thomas crp->crp_seq = ret_worker->reorder_ops++; 90939bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 91039bbca6fSFabien Thomas } 91139bbca6fSFabien Thomas 91239bbca6fSFabien Thomas TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp); 91339bbca6fSFabien Thomas taskqueue_enqueue(crypto_tq, &crp->crp_task); 91439bbca6fSFabien Thomas return (0); 91539bbca6fSFabien Thomas } 9164acae0acSPawel Jakub Dawidek 917eb73a605SSam Leffler if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) { 91839bbca6fSFabien Thomas hid = CRYPTO_SESID2HID(crp->crp_sid); 91939bbca6fSFabien Thomas 920eb73a605SSam Leffler /* 921eb73a605SSam Leffler * Caller marked the request to be processed 922eb73a605SSam Leffler * immediately; dispatch it directly to the 923eb73a605SSam Leffler * driver unless the driver is currently blocked. 924eb73a605SSam Leffler */ 925f7890744SSam Leffler cap = crypto_checkdriver(hid); 9264acae0acSPawel Jakub Dawidek /* Driver cannot disappeared when there is an active session. */ 9274acae0acSPawel Jakub Dawidek KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__)); 9284acae0acSPawel Jakub Dawidek if (!cap->cc_qblocked) { 9294acae0acSPawel Jakub Dawidek result = crypto_invoke(cap, crp, 0); 9304acae0acSPawel Jakub Dawidek if (result != ERESTART) 9314acae0acSPawel Jakub Dawidek return (result); 932091d81d1SSam Leffler /* 933bda0abc6SPawel Jakub Dawidek * The driver ran out of resources, put the request on 934bda0abc6SPawel Jakub Dawidek * the queue. 935091d81d1SSam Leffler */ 936f7890744SSam Leffler } 937eb73a605SSam Leffler } 93839bbca6fSFabien Thomas crypto_batch_enqueue(crp); 93939bbca6fSFabien Thomas return 0; 94039bbca6fSFabien Thomas } 94139bbca6fSFabien Thomas 94239bbca6fSFabien Thomas void 94339bbca6fSFabien Thomas crypto_batch_enqueue(struct cryptop *crp) 94439bbca6fSFabien Thomas { 94539bbca6fSFabien Thomas 9464acae0acSPawel Jakub Dawidek CRYPTO_Q_LOCK(); 9474acae0acSPawel Jakub Dawidek TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); 9483a865c82SPawel Jakub Dawidek if (crp_sleep) 9493a865c82SPawel Jakub Dawidek wakeup_one(&crp_q); 9503569ae7fSSam Leffler CRYPTO_Q_UNLOCK(); 951091d81d1SSam Leffler } 952091d81d1SSam Leffler 953091d81d1SSam Leffler /* 954091d81d1SSam Leffler * Add an asymetric crypto request to a queue, 955091d81d1SSam Leffler * to be processed by the kernel thread. 956091d81d1SSam Leffler */ 957091d81d1SSam Leffler int 958091d81d1SSam Leffler crypto_kdispatch(struct cryptkop *krp) 959091d81d1SSam Leffler { 9606810ad6fSSam Leffler int error; 961091d81d1SSam Leffler 9627d1853eeSSam Leffler cryptostats.cs_kops++; 9637d1853eeSSam Leffler 9646810ad6fSSam Leffler error = crypto_kinvoke(krp, krp->krp_crid); 9656810ad6fSSam Leffler if (error == ERESTART) { 966091d81d1SSam Leffler CRYPTO_Q_LOCK(); 9674acae0acSPawel Jakub Dawidek TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); 9683a865c82SPawel Jakub Dawidek if (crp_sleep) 9693a865c82SPawel Jakub Dawidek wakeup_one(&crp_q); 970091d81d1SSam Leffler CRYPTO_Q_UNLOCK(); 9716810ad6fSSam Leffler error = 0; 9726810ad6fSSam Leffler } 9736810ad6fSSam Leffler return error; 974091d81d1SSam Leffler } 975091d81d1SSam Leffler 976091d81d1SSam Leffler /* 9776810ad6fSSam Leffler * Verify a driver is suitable for the specified operation. 9786810ad6fSSam Leffler */ 9796810ad6fSSam Leffler static __inline int 9806810ad6fSSam Leffler kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp) 9816810ad6fSSam Leffler { 9826810ad6fSSam Leffler return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0; 9836810ad6fSSam Leffler } 9846810ad6fSSam Leffler 9856810ad6fSSam Leffler /* 9866810ad6fSSam Leffler * Select a driver for an asym operation. The driver must 9876810ad6fSSam Leffler * support the necessary algorithm. The caller can constrain 9886810ad6fSSam Leffler * which device is selected with the flags parameter. The 9896810ad6fSSam Leffler * algorithm we use here is pretty stupid; just use the first 9906810ad6fSSam Leffler * driver that supports the algorithms we need. If there are 9916810ad6fSSam Leffler * multiple suitable drivers we choose the driver with the 9926810ad6fSSam Leffler * fewest active operations. We prefer hardware-backed 9936810ad6fSSam Leffler * drivers to software ones when either may be used. 9946810ad6fSSam Leffler */ 9956810ad6fSSam Leffler static struct cryptocap * 9966810ad6fSSam Leffler crypto_select_kdriver(const struct cryptkop *krp, int flags) 9976810ad6fSSam Leffler { 998151ba793SAlexander Kabaev struct cryptocap *cap, *best; 9996810ad6fSSam Leffler int match, hid; 10006810ad6fSSam Leffler 10016810ad6fSSam Leffler CRYPTO_DRIVER_ASSERT(); 10026810ad6fSSam Leffler 10036810ad6fSSam Leffler /* 10046810ad6fSSam Leffler * Look first for hardware crypto devices if permitted. 10056810ad6fSSam Leffler */ 10066810ad6fSSam Leffler if (flags & CRYPTOCAP_F_HARDWARE) 10076810ad6fSSam Leffler match = CRYPTOCAP_F_HARDWARE; 10086810ad6fSSam Leffler else 10096810ad6fSSam Leffler match = CRYPTOCAP_F_SOFTWARE; 10106810ad6fSSam Leffler best = NULL; 10116810ad6fSSam Leffler again: 10126810ad6fSSam Leffler for (hid = 0; hid < crypto_drivers_num; hid++) { 10136810ad6fSSam Leffler cap = &crypto_drivers[hid]; 10146810ad6fSSam Leffler /* 10156810ad6fSSam Leffler * If it's not initialized, is in the process of 10166810ad6fSSam Leffler * going away, or is not appropriate (hardware 10176810ad6fSSam Leffler * or software based on match), then skip. 10186810ad6fSSam Leffler */ 10196810ad6fSSam Leffler if (cap->cc_dev == NULL || 10206810ad6fSSam Leffler (cap->cc_flags & CRYPTOCAP_F_CLEANUP) || 10216810ad6fSSam Leffler (cap->cc_flags & match) == 0) 10226810ad6fSSam Leffler continue; 10236810ad6fSSam Leffler 10246810ad6fSSam Leffler /* verify all the algorithms are supported. */ 10256810ad6fSSam Leffler if (kdriver_suitable(cap, krp)) { 10266810ad6fSSam Leffler if (best == NULL || 10276810ad6fSSam Leffler cap->cc_koperations < best->cc_koperations) 10286810ad6fSSam Leffler best = cap; 10296810ad6fSSam Leffler } 10306810ad6fSSam Leffler } 10316810ad6fSSam Leffler if (best != NULL) 10326810ad6fSSam Leffler return best; 10336810ad6fSSam Leffler if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) { 10346810ad6fSSam Leffler /* sort of an Algol 68-style for loop */ 10356810ad6fSSam Leffler match = CRYPTOCAP_F_SOFTWARE; 10366810ad6fSSam Leffler goto again; 10376810ad6fSSam Leffler } 10386810ad6fSSam Leffler return best; 10396810ad6fSSam Leffler } 10406810ad6fSSam Leffler 10416810ad6fSSam Leffler /* 10421762773dSPedro F. Giffuni * Dispatch an asymmetric crypto request. 1043091d81d1SSam Leffler */ 1044091d81d1SSam Leffler static int 10456810ad6fSSam Leffler crypto_kinvoke(struct cryptkop *krp, int crid) 1046091d81d1SSam Leffler { 10474acae0acSPawel Jakub Dawidek struct cryptocap *cap = NULL; 10486810ad6fSSam Leffler int error; 1049091d81d1SSam Leffler 10504acae0acSPawel Jakub Dawidek KASSERT(krp != NULL, ("%s: krp == NULL", __func__)); 10514acae0acSPawel Jakub Dawidek KASSERT(krp->krp_callback != NULL, 10524acae0acSPawel Jakub Dawidek ("%s: krp->crp_callback == NULL", __func__)); 1053091d81d1SSam Leffler 10544acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_LOCK(); 10556810ad6fSSam Leffler if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 10566810ad6fSSam Leffler cap = crypto_checkdriver(crid); 10576810ad6fSSam Leffler if (cap != NULL) { 10586810ad6fSSam Leffler /* 10596810ad6fSSam Leffler * Driver present, it must support the necessary 10606810ad6fSSam Leffler * algorithm and, if s/w drivers are excluded, 10616810ad6fSSam Leffler * it must be registered as hardware-backed. 10626810ad6fSSam Leffler */ 10636810ad6fSSam Leffler if (!kdriver_suitable(cap, krp) || 10646810ad6fSSam Leffler (!crypto_devallowsoft && 10656810ad6fSSam Leffler (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0)) 10666810ad6fSSam Leffler cap = NULL; 10674acae0acSPawel Jakub Dawidek } 10686810ad6fSSam Leffler } else { 10696810ad6fSSam Leffler /* 10706810ad6fSSam Leffler * No requested driver; select based on crid flags. 10716810ad6fSSam Leffler */ 10726810ad6fSSam Leffler if (!crypto_devallowsoft) /* NB: disallow s/w drivers */ 10736810ad6fSSam Leffler crid &= ~CRYPTOCAP_F_SOFTWARE; 10746810ad6fSSam Leffler cap = crypto_select_kdriver(krp, crid); 10754acae0acSPawel Jakub Dawidek } 10766810ad6fSSam Leffler if (cap != NULL && !cap->cc_kqblocked) { 10776810ad6fSSam Leffler krp->krp_hid = cap - crypto_drivers; 10784acae0acSPawel Jakub Dawidek cap->cc_koperations++; 10794acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_UNLOCK(); 10806810ad6fSSam Leffler error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0); 10814acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_LOCK(); 10824acae0acSPawel Jakub Dawidek if (error == ERESTART) { 10834acae0acSPawel Jakub Dawidek cap->cc_koperations--; 10844acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_UNLOCK(); 10854acae0acSPawel Jakub Dawidek return (error); 10864acae0acSPawel Jakub Dawidek } 10874acae0acSPawel Jakub Dawidek } else { 10886810ad6fSSam Leffler /* 10896810ad6fSSam Leffler * NB: cap is !NULL if device is blocked; in 10906810ad6fSSam Leffler * that case return ERESTART so the operation 10916810ad6fSSam Leffler * is resubmitted if possible. 10926810ad6fSSam Leffler */ 10936810ad6fSSam Leffler error = (cap == NULL) ? ENODEV : ERESTART; 10944acae0acSPawel Jakub Dawidek } 10954acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_UNLOCK(); 1096091d81d1SSam Leffler 1097091d81d1SSam Leffler if (error) { 1098091d81d1SSam Leffler krp->krp_status = error; 10991a91ccccSSam Leffler crypto_kdone(krp); 1100091d81d1SSam Leffler } 1101091d81d1SSam Leffler return 0; 1102091d81d1SSam Leffler } 1103091d81d1SSam Leffler 11047d1853eeSSam Leffler #ifdef CRYPTO_TIMING 11057d1853eeSSam Leffler static void 11067d1853eeSSam Leffler crypto_tstat(struct cryptotstat *ts, struct bintime *bt) 11077d1853eeSSam Leffler { 11087d1853eeSSam Leffler struct bintime now, delta; 11097d1853eeSSam Leffler struct timespec t; 11107d1853eeSSam Leffler uint64_t u; 11117d1853eeSSam Leffler 11127d1853eeSSam Leffler binuptime(&now); 11137d1853eeSSam Leffler u = now.frac; 11147d1853eeSSam Leffler delta.frac = now.frac - bt->frac; 11157d1853eeSSam Leffler delta.sec = now.sec - bt->sec; 11167d1853eeSSam Leffler if (u < delta.frac) 11177d1853eeSSam Leffler delta.sec--; 11187d1853eeSSam Leffler bintime2timespec(&delta, &t); 11197d1853eeSSam Leffler timespecadd(&ts->acc, &t); 11207d1853eeSSam Leffler if (timespeccmp(&t, &ts->min, <)) 11217d1853eeSSam Leffler ts->min = t; 11227d1853eeSSam Leffler if (timespeccmp(&t, &ts->max, >)) 11237d1853eeSSam Leffler ts->max = t; 11247d1853eeSSam Leffler ts->count++; 11257d1853eeSSam Leffler 11267d1853eeSSam Leffler *bt = now; 11277d1853eeSSam Leffler } 11287d1853eeSSam Leffler #endif 11297d1853eeSSam Leffler 113039bbca6fSFabien Thomas static void 113139bbca6fSFabien Thomas crypto_task_invoke(void *ctx, int pending) 113239bbca6fSFabien Thomas { 113339bbca6fSFabien Thomas struct cryptocap *cap; 113439bbca6fSFabien Thomas struct cryptop *crp; 113539bbca6fSFabien Thomas int hid, result; 113639bbca6fSFabien Thomas 113739bbca6fSFabien Thomas crp = (struct cryptop *)ctx; 113839bbca6fSFabien Thomas 113939bbca6fSFabien Thomas hid = CRYPTO_SESID2HID(crp->crp_sid); 114039bbca6fSFabien Thomas cap = crypto_checkdriver(hid); 114139bbca6fSFabien Thomas 114239bbca6fSFabien Thomas result = crypto_invoke(cap, crp, 0); 114339bbca6fSFabien Thomas if (result == ERESTART) 114439bbca6fSFabien Thomas crypto_batch_enqueue(crp); 114539bbca6fSFabien Thomas } 114639bbca6fSFabien Thomas 1147091d81d1SSam Leffler /* 1148091d81d1SSam Leffler * Dispatch a crypto request to the appropriate crypto devices. 1149091d81d1SSam Leffler */ 1150091d81d1SSam Leffler static int 11514acae0acSPawel Jakub Dawidek crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint) 1152091d81d1SSam Leffler { 11534acae0acSPawel Jakub Dawidek 11544acae0acSPawel Jakub Dawidek KASSERT(crp != NULL, ("%s: crp == NULL", __func__)); 11554acae0acSPawel Jakub Dawidek KASSERT(crp->crp_callback != NULL, 11564acae0acSPawel Jakub Dawidek ("%s: crp->crp_callback == NULL", __func__)); 11574acae0acSPawel Jakub Dawidek KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__)); 1158091d81d1SSam Leffler 11597d1853eeSSam Leffler #ifdef CRYPTO_TIMING 11607d1853eeSSam Leffler if (crypto_timing) 11617d1853eeSSam Leffler crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp); 11627d1853eeSSam Leffler #endif 11634acae0acSPawel Jakub Dawidek if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 1164091d81d1SSam Leffler struct cryptodesc *crd; 1165091d81d1SSam Leffler u_int64_t nid; 1166091d81d1SSam Leffler 1167091d81d1SSam Leffler /* 1168091d81d1SSam Leffler * Driver has unregistered; migrate the session and return 1169091d81d1SSam Leffler * an error to the caller so they'll resubmit the op. 11704acae0acSPawel Jakub Dawidek * 11714acae0acSPawel Jakub Dawidek * XXX: What if there are more already queued requests for this 11724acae0acSPawel Jakub Dawidek * session? 1173091d81d1SSam Leffler */ 11744acae0acSPawel Jakub Dawidek crypto_freesession(crp->crp_sid); 11754acae0acSPawel Jakub Dawidek 1176091d81d1SSam Leffler for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next) 1177091d81d1SSam Leffler crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI); 1178091d81d1SSam Leffler 11796810ad6fSSam Leffler /* XXX propagate flags from initial session? */ 11806810ad6fSSam Leffler if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 11816810ad6fSSam Leffler CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0) 1182091d81d1SSam Leffler crp->crp_sid = nid; 1183091d81d1SSam Leffler 1184091d81d1SSam Leffler crp->crp_etype = EAGAIN; 11851a91ccccSSam Leffler crypto_done(crp); 1186091d81d1SSam Leffler return 0; 1187091d81d1SSam Leffler } else { 1188091d81d1SSam Leffler /* 1189091d81d1SSam Leffler * Invoke the driver to process the request. 1190091d81d1SSam Leffler */ 11916810ad6fSSam Leffler return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint); 1192091d81d1SSam Leffler } 1193091d81d1SSam Leffler } 1194091d81d1SSam Leffler 1195091d81d1SSam Leffler /* 1196091d81d1SSam Leffler * Release a set of crypto descriptors. 1197091d81d1SSam Leffler */ 1198091d81d1SSam Leffler void 1199091d81d1SSam Leffler crypto_freereq(struct cryptop *crp) 1200091d81d1SSam Leffler { 1201091d81d1SSam Leffler struct cryptodesc *crd; 1202091d81d1SSam Leffler 1203091d81d1SSam Leffler if (crp == NULL) 1204091d81d1SSam Leffler return; 1205091d81d1SSam Leffler 12060d5c337bSPawel Jakub Dawidek #ifdef DIAGNOSTIC 12070d5c337bSPawel Jakub Dawidek { 12080d5c337bSPawel Jakub Dawidek struct cryptop *crp2; 120939bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 12100d5c337bSPawel Jakub Dawidek 12110d5c337bSPawel Jakub Dawidek CRYPTO_Q_LOCK(); 12120d5c337bSPawel Jakub Dawidek TAILQ_FOREACH(crp2, &crp_q, crp_next) { 12130d5c337bSPawel Jakub Dawidek KASSERT(crp2 != crp, 12140d5c337bSPawel Jakub Dawidek ("Freeing cryptop from the crypto queue (%p).", 12150d5c337bSPawel Jakub Dawidek crp)); 12160d5c337bSPawel Jakub Dawidek } 12170d5c337bSPawel Jakub Dawidek CRYPTO_Q_UNLOCK(); 121839bbca6fSFabien Thomas 121939bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) { 122039bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 122139bbca6fSFabien Thomas TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) { 12220d5c337bSPawel Jakub Dawidek KASSERT(crp2 != crp, 12230d5c337bSPawel Jakub Dawidek ("Freeing cryptop from the return queue (%p).", 12240d5c337bSPawel Jakub Dawidek crp)); 12250d5c337bSPawel Jakub Dawidek } 122639bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 122739bbca6fSFabien Thomas } 12280d5c337bSPawel Jakub Dawidek } 12290d5c337bSPawel Jakub Dawidek #endif 12300d5c337bSPawel Jakub Dawidek 1231091d81d1SSam Leffler while ((crd = crp->crp_desc) != NULL) { 1232091d81d1SSam Leffler crp->crp_desc = crd->crd_next; 1233091d81d1SSam Leffler uma_zfree(cryptodesc_zone, crd); 1234091d81d1SSam Leffler } 1235091d81d1SSam Leffler uma_zfree(cryptop_zone, crp); 1236091d81d1SSam Leffler } 1237091d81d1SSam Leffler 1238091d81d1SSam Leffler /* 1239091d81d1SSam Leffler * Acquire a set of crypto descriptors. 1240091d81d1SSam Leffler */ 1241091d81d1SSam Leffler struct cryptop * 1242091d81d1SSam Leffler crypto_getreq(int num) 1243091d81d1SSam Leffler { 1244091d81d1SSam Leffler struct cryptodesc *crd; 1245091d81d1SSam Leffler struct cryptop *crp; 1246091d81d1SSam Leffler 1247bc0c6d3cSSam Leffler crp = uma_zalloc(cryptop_zone, M_NOWAIT|M_ZERO); 1248091d81d1SSam Leffler if (crp != NULL) { 1249091d81d1SSam Leffler while (num--) { 1250bc0c6d3cSSam Leffler crd = uma_zalloc(cryptodesc_zone, M_NOWAIT|M_ZERO); 1251091d81d1SSam Leffler if (crd == NULL) { 1252091d81d1SSam Leffler crypto_freereq(crp); 1253091d81d1SSam Leffler return NULL; 1254091d81d1SSam Leffler } 1255091d81d1SSam Leffler 1256091d81d1SSam Leffler crd->crd_next = crp->crp_desc; 1257091d81d1SSam Leffler crp->crp_desc = crd; 1258091d81d1SSam Leffler } 1259091d81d1SSam Leffler } 1260091d81d1SSam Leffler return crp; 1261091d81d1SSam Leffler } 1262091d81d1SSam Leffler 1263091d81d1SSam Leffler /* 1264091d81d1SSam Leffler * Invoke the callback on behalf of the driver. 1265091d81d1SSam Leffler */ 1266091d81d1SSam Leffler void 1267091d81d1SSam Leffler crypto_done(struct cryptop *crp) 1268091d81d1SSam Leffler { 12693569ae7fSSam Leffler KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0, 12703569ae7fSSam Leffler ("crypto_done: op already done, flags 0x%x", crp->crp_flags)); 12713569ae7fSSam Leffler crp->crp_flags |= CRYPTO_F_DONE; 12727d1853eeSSam Leffler if (crp->crp_etype != 0) 12737d1853eeSSam Leffler cryptostats.cs_errs++; 12747d1853eeSSam Leffler #ifdef CRYPTO_TIMING 12757d1853eeSSam Leffler if (crypto_timing) 12767d1853eeSSam Leffler crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp); 12777d1853eeSSam Leffler #endif 1278d8409aafSSam Leffler /* 1279d8409aafSSam Leffler * CBIMM means unconditionally do the callback immediately; 1280d8409aafSSam Leffler * CBIFSYNC means do the callback immediately only if the 1281d8409aafSSam Leffler * operation was done synchronously. Both are used to avoid 1282d8409aafSSam Leffler * doing extraneous context switches; the latter is mostly 1283d8409aafSSam Leffler * used with the software crypto driver. 1284d8409aafSSam Leffler */ 128539bbca6fSFabien Thomas if (!CRYPTOP_ASYNC_KEEPORDER(crp) && 128639bbca6fSFabien Thomas ((crp->crp_flags & CRYPTO_F_CBIMM) || 1287d8409aafSSam Leffler ((crp->crp_flags & CRYPTO_F_CBIFSYNC) && 128839bbca6fSFabien Thomas (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC)))) { 1289eb73a605SSam Leffler /* 1290eb73a605SSam Leffler * Do the callback directly. This is ok when the 1291eb73a605SSam Leffler * callback routine does very little (e.g. the 1292eb73a605SSam Leffler * /dev/crypto callback method just does a wakeup). 1293eb73a605SSam Leffler */ 1294eb73a605SSam Leffler #ifdef CRYPTO_TIMING 1295eb73a605SSam Leffler if (crypto_timing) { 1296eb73a605SSam Leffler /* 1297eb73a605SSam Leffler * NB: We must copy the timestamp before 1298eb73a605SSam Leffler * doing the callback as the cryptop is 1299eb73a605SSam Leffler * likely to be reclaimed. 1300eb73a605SSam Leffler */ 1301eb73a605SSam Leffler struct bintime t = crp->crp_tstamp; 1302eb73a605SSam Leffler crypto_tstat(&cryptostats.cs_cb, &t); 1303eb73a605SSam Leffler crp->crp_callback(crp); 1304eb73a605SSam Leffler crypto_tstat(&cryptostats.cs_finis, &t); 1305eb73a605SSam Leffler } else 1306eb73a605SSam Leffler #endif 1307eb73a605SSam Leffler crp->crp_callback(crp); 1308eb73a605SSam Leffler } else { 130939bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 131039bbca6fSFabien Thomas bool wake; 131139bbca6fSFabien Thomas 131239bbca6fSFabien Thomas ret_worker = CRYPTO_RETW(crp->crp_retw_id); 131339bbca6fSFabien Thomas wake = false; 131439bbca6fSFabien Thomas 1315eb73a605SSam Leffler /* 1316eb73a605SSam Leffler * Normal case; queue the callback for the thread. 1317eb73a605SSam Leffler */ 131839bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 131939bbca6fSFabien Thomas if (CRYPTOP_ASYNC_KEEPORDER(crp)) { 132039bbca6fSFabien Thomas struct cryptop *tmp; 132139bbca6fSFabien Thomas 132239bbca6fSFabien Thomas TAILQ_FOREACH_REVERSE(tmp, &ret_worker->crp_ordered_ret_q, 132339bbca6fSFabien Thomas cryptop_q, crp_next) { 132439bbca6fSFabien Thomas if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) { 132539bbca6fSFabien Thomas TAILQ_INSERT_AFTER(&ret_worker->crp_ordered_ret_q, 132639bbca6fSFabien Thomas tmp, crp, crp_next); 132739bbca6fSFabien Thomas break; 132839bbca6fSFabien Thomas } 132939bbca6fSFabien Thomas } 133039bbca6fSFabien Thomas if (tmp == NULL) { 133139bbca6fSFabien Thomas TAILQ_INSERT_HEAD(&ret_worker->crp_ordered_ret_q, 133239bbca6fSFabien Thomas crp, crp_next); 133339bbca6fSFabien Thomas } 133439bbca6fSFabien Thomas 133539bbca6fSFabien Thomas if (crp->crp_seq == ret_worker->reorder_cur_seq) 133639bbca6fSFabien Thomas wake = true; 133739bbca6fSFabien Thomas } 133839bbca6fSFabien Thomas else { 133939bbca6fSFabien Thomas if (CRYPTO_RETW_EMPTY(ret_worker)) 134039bbca6fSFabien Thomas wake = true; 134139bbca6fSFabien Thomas 134239bbca6fSFabien Thomas TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, crp_next); 134339bbca6fSFabien Thomas } 134439bbca6fSFabien Thomas 134539bbca6fSFabien Thomas if (wake) 134639bbca6fSFabien Thomas wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ 134739bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 1348091d81d1SSam Leffler } 1349eb73a605SSam Leffler } 1350091d81d1SSam Leffler 1351091d81d1SSam Leffler /* 1352091d81d1SSam Leffler * Invoke the callback on behalf of the driver. 1353091d81d1SSam Leffler */ 1354091d81d1SSam Leffler void 1355091d81d1SSam Leffler crypto_kdone(struct cryptkop *krp) 1356091d81d1SSam Leffler { 135739bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 13584acae0acSPawel Jakub Dawidek struct cryptocap *cap; 1359091d81d1SSam Leffler 13607d1853eeSSam Leffler if (krp->krp_status != 0) 13617d1853eeSSam Leffler cryptostats.cs_kerrs++; 13624acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_LOCK(); 13634acae0acSPawel Jakub Dawidek /* XXX: What if driver is loaded in the meantime? */ 13644acae0acSPawel Jakub Dawidek if (krp->krp_hid < crypto_drivers_num) { 13654acae0acSPawel Jakub Dawidek cap = &crypto_drivers[krp->krp_hid]; 1366fb17b4c5SJohn-Mark Gurney KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0")); 13674acae0acSPawel Jakub Dawidek cap->cc_koperations--; 13684acae0acSPawel Jakub Dawidek if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) 13694acae0acSPawel Jakub Dawidek crypto_remove(cap); 13704acae0acSPawel Jakub Dawidek } 13714acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_UNLOCK(); 137239bbca6fSFabien Thomas 137339bbca6fSFabien Thomas ret_worker = CRYPTO_RETW(0); 137439bbca6fSFabien Thomas 137539bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 137639bbca6fSFabien Thomas if (CRYPTO_RETW_EMPTY(ret_worker)) 137739bbca6fSFabien Thomas wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ 137839bbca6fSFabien Thomas TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next); 137939bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 1380091d81d1SSam Leffler } 1381091d81d1SSam Leffler 1382091d81d1SSam Leffler int 1383091d81d1SSam Leffler crypto_getfeat(int *featp) 1384091d81d1SSam Leffler { 1385091d81d1SSam Leffler int hid, kalg, feat = 0; 1386091d81d1SSam Leffler 1387091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 1388091d81d1SSam Leffler for (hid = 0; hid < crypto_drivers_num; hid++) { 13896810ad6fSSam Leffler const struct cryptocap *cap = &crypto_drivers[hid]; 13906810ad6fSSam Leffler 13916810ad6fSSam Leffler if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && 1392091d81d1SSam Leffler !crypto_devallowsoft) { 1393091d81d1SSam Leffler continue; 1394091d81d1SSam Leffler } 1395091d81d1SSam Leffler for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) 13966810ad6fSSam Leffler if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED) 1397091d81d1SSam Leffler feat |= 1 << kalg; 1398091d81d1SSam Leffler } 1399091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 1400091d81d1SSam Leffler *featp = feat; 1401091d81d1SSam Leffler return (0); 1402091d81d1SSam Leffler } 1403091d81d1SSam Leffler 140451e45326SSam Leffler /* 140551e45326SSam Leffler * Terminate a thread at module unload. The process that 140651e45326SSam Leffler * initiated this is waiting for us to signal that we're gone; 140751e45326SSam Leffler * wake it up and exit. We use the driver table lock to insure 140851e45326SSam Leffler * we don't do the wakeup before they're waiting. There is no 140951e45326SSam Leffler * race here because the waiter sleeps on the proc lock for the 141051e45326SSam Leffler * thread so it gets notified at the right time because of an 141151e45326SSam Leffler * extra wakeup that's done in exit1(). 141251e45326SSam Leffler */ 1413091d81d1SSam Leffler static void 141451e45326SSam Leffler crypto_finis(void *chan) 1415091d81d1SSam Leffler { 141651e45326SSam Leffler CRYPTO_DRIVER_LOCK(); 141751e45326SSam Leffler wakeup_one(chan); 141851e45326SSam Leffler CRYPTO_DRIVER_UNLOCK(); 14193745c395SJulian Elischer kproc_exit(0); 1420091d81d1SSam Leffler } 1421091d81d1SSam Leffler 1422091d81d1SSam Leffler /* 14231a91ccccSSam Leffler * Crypto thread, dispatches crypto requests. 1424091d81d1SSam Leffler */ 1425091d81d1SSam Leffler static void 1426091d81d1SSam Leffler crypto_proc(void) 1427091d81d1SSam Leffler { 14281a91ccccSSam Leffler struct cryptop *crp, *submit; 14291a91ccccSSam Leffler struct cryptkop *krp; 1430091d81d1SSam Leffler struct cryptocap *cap; 14314acae0acSPawel Jakub Dawidek u_int32_t hid; 1432091d81d1SSam Leffler int result, hint; 1433091d81d1SSam Leffler 14346ed982a2SAndrew Turner #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 143504c49e68SKonstantin Belousov fpu_kern_thread(FPU_KERN_NORMAL); 143604c49e68SKonstantin Belousov #endif 143704c49e68SKonstantin Belousov 14381a91ccccSSam Leffler CRYPTO_Q_LOCK(); 1439091d81d1SSam Leffler for (;;) { 1440091d81d1SSam Leffler /* 1441091d81d1SSam Leffler * Find the first element in the queue that can be 1442091d81d1SSam Leffler * processed and look-ahead to see if multiple ops 1443091d81d1SSam Leffler * are ready for the same driver. 1444091d81d1SSam Leffler */ 1445091d81d1SSam Leffler submit = NULL; 1446091d81d1SSam Leffler hint = 0; 1447091d81d1SSam Leffler TAILQ_FOREACH(crp, &crp_q, crp_next) { 14484acae0acSPawel Jakub Dawidek hid = CRYPTO_SESID2HID(crp->crp_sid); 1449091d81d1SSam Leffler cap = crypto_checkdriver(hid); 14504acae0acSPawel Jakub Dawidek /* 14514acae0acSPawel Jakub Dawidek * Driver cannot disappeared when there is an active 14524acae0acSPawel Jakub Dawidek * session. 14534acae0acSPawel Jakub Dawidek */ 1454c3c82036SPawel Jakub Dawidek KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 1455c3c82036SPawel Jakub Dawidek __func__, __LINE__)); 14566810ad6fSSam Leffler if (cap == NULL || cap->cc_dev == NULL) { 1457091d81d1SSam Leffler /* Op needs to be migrated, process it. */ 1458091d81d1SSam Leffler if (submit == NULL) 1459091d81d1SSam Leffler submit = crp; 1460091d81d1SSam Leffler break; 1461091d81d1SSam Leffler } 1462091d81d1SSam Leffler if (!cap->cc_qblocked) { 1463091d81d1SSam Leffler if (submit != NULL) { 1464091d81d1SSam Leffler /* 1465091d81d1SSam Leffler * We stop on finding another op, 1466091d81d1SSam Leffler * regardless whether its for the same 1467091d81d1SSam Leffler * driver or not. We could keep 1468091d81d1SSam Leffler * searching the queue but it might be 1469091d81d1SSam Leffler * better to just use a per-driver 1470091d81d1SSam Leffler * queue instead. 1471091d81d1SSam Leffler */ 147207d0c94aSSam Leffler if (CRYPTO_SESID2HID(submit->crp_sid) == hid) 1473091d81d1SSam Leffler hint = CRYPTO_HINT_MORE; 1474091d81d1SSam Leffler break; 1475091d81d1SSam Leffler } else { 1476091d81d1SSam Leffler submit = crp; 1477eb73a605SSam Leffler if ((submit->crp_flags & CRYPTO_F_BATCH) == 0) 1478091d81d1SSam Leffler break; 1479091d81d1SSam Leffler /* keep scanning for more are q'd */ 1480091d81d1SSam Leffler } 1481091d81d1SSam Leffler } 1482091d81d1SSam Leffler } 1483091d81d1SSam Leffler if (submit != NULL) { 1484091d81d1SSam Leffler TAILQ_REMOVE(&crp_q, submit, crp_next); 14854acae0acSPawel Jakub Dawidek hid = CRYPTO_SESID2HID(submit->crp_sid); 14864acae0acSPawel Jakub Dawidek cap = crypto_checkdriver(hid); 1487c3c82036SPawel Jakub Dawidek KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 1488c3c82036SPawel Jakub Dawidek __func__, __LINE__)); 14894acae0acSPawel Jakub Dawidek result = crypto_invoke(cap, submit, hint); 1490091d81d1SSam Leffler if (result == ERESTART) { 1491091d81d1SSam Leffler /* 1492091d81d1SSam Leffler * The driver ran out of resources, mark the 1493091d81d1SSam Leffler * driver ``blocked'' for cryptop's and put 1494091d81d1SSam Leffler * the request back in the queue. It would 1495091d81d1SSam Leffler * best to put the request back where we got 1496091d81d1SSam Leffler * it but that's hard so for now we put it 1497091d81d1SSam Leffler * at the front. This should be ok; putting 1498091d81d1SSam Leffler * it at the end does not work. 1499091d81d1SSam Leffler */ 1500091d81d1SSam Leffler /* XXX validate sid again? */ 150107d0c94aSSam Leffler crypto_drivers[CRYPTO_SESID2HID(submit->crp_sid)].cc_qblocked = 1; 1502091d81d1SSam Leffler TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); 15037d1853eeSSam Leffler cryptostats.cs_blocks++; 1504091d81d1SSam Leffler } 1505091d81d1SSam Leffler } 1506091d81d1SSam Leffler 1507091d81d1SSam Leffler /* As above, but for key ops */ 1508091d81d1SSam Leffler TAILQ_FOREACH(krp, &crp_kq, krp_next) { 1509091d81d1SSam Leffler cap = crypto_checkdriver(krp->krp_hid); 15106810ad6fSSam Leffler if (cap == NULL || cap->cc_dev == NULL) { 15116810ad6fSSam Leffler /* 15126810ad6fSSam Leffler * Operation needs to be migrated, invalidate 15136810ad6fSSam Leffler * the assigned device so it will reselect a 15146810ad6fSSam Leffler * new one below. Propagate the original 15156810ad6fSSam Leffler * crid selection flags if supplied. 15166810ad6fSSam Leffler */ 15176810ad6fSSam Leffler krp->krp_hid = krp->krp_crid & 15186810ad6fSSam Leffler (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE); 15196810ad6fSSam Leffler if (krp->krp_hid == 0) 15206810ad6fSSam Leffler krp->krp_hid = 15216810ad6fSSam Leffler CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE; 1522091d81d1SSam Leffler break; 1523091d81d1SSam Leffler } 1524091d81d1SSam Leffler if (!cap->cc_kqblocked) 1525091d81d1SSam Leffler break; 1526091d81d1SSam Leffler } 1527091d81d1SSam Leffler if (krp != NULL) { 1528091d81d1SSam Leffler TAILQ_REMOVE(&crp_kq, krp, krp_next); 15296810ad6fSSam Leffler result = crypto_kinvoke(krp, krp->krp_hid); 1530091d81d1SSam Leffler if (result == ERESTART) { 1531091d81d1SSam Leffler /* 1532091d81d1SSam Leffler * The driver ran out of resources, mark the 1533091d81d1SSam Leffler * driver ``blocked'' for cryptkop's and put 1534091d81d1SSam Leffler * the request back in the queue. It would 1535091d81d1SSam Leffler * best to put the request back where we got 1536091d81d1SSam Leffler * it but that's hard so for now we put it 1537091d81d1SSam Leffler * at the front. This should be ok; putting 1538091d81d1SSam Leffler * it at the end does not work. 1539091d81d1SSam Leffler */ 1540091d81d1SSam Leffler /* XXX validate sid again? */ 1541091d81d1SSam Leffler crypto_drivers[krp->krp_hid].cc_kqblocked = 1; 1542091d81d1SSam Leffler TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); 15437d1853eeSSam Leffler cryptostats.cs_kblocks++; 1544091d81d1SSam Leffler } 1545091d81d1SSam Leffler } 1546091d81d1SSam Leffler 15471a91ccccSSam Leffler if (submit == NULL && krp == NULL) { 1548091d81d1SSam Leffler /* 1549091d81d1SSam Leffler * Nothing more to be processed. Sleep until we're 1550091d81d1SSam Leffler * woken because there are more ops to process. 1551091d81d1SSam Leffler * This happens either by submission or by a driver 1552091d81d1SSam Leffler * becoming unblocked and notifying us through 1553091d81d1SSam Leffler * crypto_unblock. Note that when we wakeup we 1554091d81d1SSam Leffler * start processing each queue again from the 1555091d81d1SSam Leffler * front. It's not clear that it's important to 1556091d81d1SSam Leffler * preserve this ordering since ops may finish 1557091d81d1SSam Leffler * out of order if dispatched to different devices 1558091d81d1SSam Leffler * and some become blocked while others do not. 1559091d81d1SSam Leffler */ 15603a865c82SPawel Jakub Dawidek crp_sleep = 1; 15611a91ccccSSam Leffler msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); 15623a865c82SPawel Jakub Dawidek crp_sleep = 0; 156351e45326SSam Leffler if (cryptoproc == NULL) 156451e45326SSam Leffler break; 15657d1853eeSSam Leffler cryptostats.cs_intrs++; 1566091d81d1SSam Leffler } 1567091d81d1SSam Leffler } 156851e45326SSam Leffler CRYPTO_Q_UNLOCK(); 15691a91ccccSSam Leffler 157051e45326SSam Leffler crypto_finis(&crp_q); 15711a91ccccSSam Leffler } 15721a91ccccSSam Leffler 15731a91ccccSSam Leffler /* 15741a91ccccSSam Leffler * Crypto returns thread, does callbacks for processed crypto requests. 15751a91ccccSSam Leffler * Callbacks are done here, rather than in the crypto drivers, because 15761a91ccccSSam Leffler * callbacks typically are expensive and would slow interrupt handling. 15771a91ccccSSam Leffler */ 15781a91ccccSSam Leffler static void 157939bbca6fSFabien Thomas crypto_ret_proc(struct crypto_ret_worker *ret_worker) 15801a91ccccSSam Leffler { 15811a91ccccSSam Leffler struct cryptop *crpt; 15821a91ccccSSam Leffler struct cryptkop *krpt; 15831a91ccccSSam Leffler 158439bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 15851a91ccccSSam Leffler for (;;) { 15861a91ccccSSam Leffler /* Harvest return q's for completed ops */ 158739bbca6fSFabien Thomas crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q); 158839bbca6fSFabien Thomas if (crpt != NULL) { 158939bbca6fSFabien Thomas if (crpt->crp_seq == ret_worker->reorder_cur_seq) { 159039bbca6fSFabien Thomas TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next); 159139bbca6fSFabien Thomas ret_worker->reorder_cur_seq++; 159239bbca6fSFabien Thomas } else { 159339bbca6fSFabien Thomas crpt = NULL; 159439bbca6fSFabien Thomas } 159539bbca6fSFabien Thomas } 15961a91ccccSSam Leffler 159739bbca6fSFabien Thomas if (crpt == NULL) { 159839bbca6fSFabien Thomas crpt = TAILQ_FIRST(&ret_worker->crp_ret_q); 159939bbca6fSFabien Thomas if (crpt != NULL) 160039bbca6fSFabien Thomas TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next); 160139bbca6fSFabien Thomas } 160239bbca6fSFabien Thomas 160339bbca6fSFabien Thomas krpt = TAILQ_FIRST(&ret_worker->crp_ret_kq); 16041a91ccccSSam Leffler if (krpt != NULL) 160539bbca6fSFabien Thomas TAILQ_REMOVE(&ret_worker->crp_ret_kq, krpt, krp_next); 16061a91ccccSSam Leffler 16071a91ccccSSam Leffler if (crpt != NULL || krpt != NULL) { 160839bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 16091a91ccccSSam Leffler /* 16101a91ccccSSam Leffler * Run callbacks unlocked. 16111a91ccccSSam Leffler */ 16127d1853eeSSam Leffler if (crpt != NULL) { 16137d1853eeSSam Leffler #ifdef CRYPTO_TIMING 16147d1853eeSSam Leffler if (crypto_timing) { 16157d1853eeSSam Leffler /* 16167d1853eeSSam Leffler * NB: We must copy the timestamp before 16177d1853eeSSam Leffler * doing the callback as the cryptop is 16187d1853eeSSam Leffler * likely to be reclaimed. 16197d1853eeSSam Leffler */ 16207d1853eeSSam Leffler struct bintime t = crpt->crp_tstamp; 16217d1853eeSSam Leffler crypto_tstat(&cryptostats.cs_cb, &t); 16221a91ccccSSam Leffler crpt->crp_callback(crpt); 16237d1853eeSSam Leffler crypto_tstat(&cryptostats.cs_finis, &t); 16247d1853eeSSam Leffler } else 16257d1853eeSSam Leffler #endif 16267d1853eeSSam Leffler crpt->crp_callback(crpt); 16277d1853eeSSam Leffler } 16281a91ccccSSam Leffler if (krpt != NULL) 16291a91ccccSSam Leffler krpt->krp_callback(krpt); 163039bbca6fSFabien Thomas CRYPTO_RETW_LOCK(ret_worker); 16311a91ccccSSam Leffler } else { 16321a91ccccSSam Leffler /* 16331a91ccccSSam Leffler * Nothing more to be processed. Sleep until we're 16341a91ccccSSam Leffler * woken because there are more returns to process. 16351a91ccccSSam Leffler */ 163639bbca6fSFabien Thomas msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT, 16371a91ccccSSam Leffler "crypto_ret_wait", 0); 163839bbca6fSFabien Thomas if (ret_worker->cryptoretproc == NULL) 163951e45326SSam Leffler break; 16407d1853eeSSam Leffler cryptostats.cs_rets++; 16411a91ccccSSam Leffler } 16421a91ccccSSam Leffler } 164339bbca6fSFabien Thomas CRYPTO_RETW_UNLOCK(ret_worker); 164451e45326SSam Leffler 164539bbca6fSFabien Thomas crypto_finis(&ret_worker->crp_ret_q); 16461a91ccccSSam Leffler } 16476810ad6fSSam Leffler 16486810ad6fSSam Leffler #ifdef DDB 16496810ad6fSSam Leffler static void 16506810ad6fSSam Leffler db_show_drivers(void) 16516810ad6fSSam Leffler { 16526810ad6fSSam Leffler int hid; 16536810ad6fSSam Leffler 16546810ad6fSSam Leffler db_printf("%12s %4s %4s %8s %2s %2s\n" 16556810ad6fSSam Leffler , "Device" 16566810ad6fSSam Leffler , "Ses" 16576810ad6fSSam Leffler , "Kops" 16586810ad6fSSam Leffler , "Flags" 16596810ad6fSSam Leffler , "QB" 16606810ad6fSSam Leffler , "KB" 16616810ad6fSSam Leffler ); 16626810ad6fSSam Leffler for (hid = 0; hid < crypto_drivers_num; hid++) { 16636810ad6fSSam Leffler const struct cryptocap *cap = &crypto_drivers[hid]; 16646810ad6fSSam Leffler if (cap->cc_dev == NULL) 16656810ad6fSSam Leffler continue; 16666810ad6fSSam Leffler db_printf("%-12s %4u %4u %08x %2u %2u\n" 16676810ad6fSSam Leffler , device_get_nameunit(cap->cc_dev) 16686810ad6fSSam Leffler , cap->cc_sessions 16696810ad6fSSam Leffler , cap->cc_koperations 16706810ad6fSSam Leffler , cap->cc_flags 16716810ad6fSSam Leffler , cap->cc_qblocked 16726810ad6fSSam Leffler , cap->cc_kqblocked 16736810ad6fSSam Leffler ); 16746810ad6fSSam Leffler } 16756810ad6fSSam Leffler } 16766810ad6fSSam Leffler 16776810ad6fSSam Leffler DB_SHOW_COMMAND(crypto, db_show_crypto) 16786810ad6fSSam Leffler { 16796810ad6fSSam Leffler struct cryptop *crp; 168039bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 16816810ad6fSSam Leffler 16826810ad6fSSam Leffler db_show_drivers(); 16836810ad6fSSam Leffler db_printf("\n"); 16846810ad6fSSam Leffler 16856810ad6fSSam Leffler db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n", 16866810ad6fSSam Leffler "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", 16876810ad6fSSam Leffler "Desc", "Callback"); 16886810ad6fSSam Leffler TAILQ_FOREACH(crp, &crp_q, crp_next) { 16896810ad6fSSam Leffler db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n" 16906810ad6fSSam Leffler , (int) CRYPTO_SESID2HID(crp->crp_sid) 16916810ad6fSSam Leffler , (int) CRYPTO_SESID2CAPS(crp->crp_sid) 16926810ad6fSSam Leffler , crp->crp_ilen, crp->crp_olen 16936810ad6fSSam Leffler , crp->crp_etype 16946810ad6fSSam Leffler , crp->crp_flags 16956810ad6fSSam Leffler , crp->crp_desc 16966810ad6fSSam Leffler , crp->crp_callback 16976810ad6fSSam Leffler ); 16986810ad6fSSam Leffler } 169939bbca6fSFabien Thomas FOREACH_CRYPTO_RETW(ret_worker) { 170039bbca6fSFabien Thomas db_printf("\n%8s %4s %4s %4s %8s\n", 170139bbca6fSFabien Thomas "ret_worker", "HID", "Etype", "Flags", "Callback"); 170239bbca6fSFabien Thomas if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { 170339bbca6fSFabien Thomas TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) { 170439bbca6fSFabien Thomas db_printf("%8td %4u %4u %04x %8p\n" 170539bbca6fSFabien Thomas , CRYPTO_RETW_ID(ret_worker) 17066810ad6fSSam Leffler , (int) CRYPTO_SESID2HID(crp->crp_sid) 17076810ad6fSSam Leffler , crp->crp_etype 17086810ad6fSSam Leffler , crp->crp_flags 17096810ad6fSSam Leffler , crp->crp_callback 17106810ad6fSSam Leffler ); 17116810ad6fSSam Leffler } 17126810ad6fSSam Leffler } 17136810ad6fSSam Leffler } 171439bbca6fSFabien Thomas } 17156810ad6fSSam Leffler 17166810ad6fSSam Leffler DB_SHOW_COMMAND(kcrypto, db_show_kcrypto) 17176810ad6fSSam Leffler { 17186810ad6fSSam Leffler struct cryptkop *krp; 171939bbca6fSFabien Thomas struct crypto_ret_worker *ret_worker; 17206810ad6fSSam Leffler 17216810ad6fSSam Leffler db_show_drivers(); 17226810ad6fSSam Leffler db_printf("\n"); 17236810ad6fSSam Leffler 17246810ad6fSSam Leffler db_printf("%4s %5s %4s %4s %8s %4s %8s\n", 17256810ad6fSSam Leffler "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback"); 17266810ad6fSSam Leffler TAILQ_FOREACH(krp, &crp_kq, krp_next) { 17276810ad6fSSam Leffler db_printf("%4u %5u %4u %4u %08x %4u %8p\n" 17286810ad6fSSam Leffler , krp->krp_op 17296810ad6fSSam Leffler , krp->krp_status 17306810ad6fSSam Leffler , krp->krp_iparams, krp->krp_oparams 17316810ad6fSSam Leffler , krp->krp_crid, krp->krp_hid 17326810ad6fSSam Leffler , krp->krp_callback 17336810ad6fSSam Leffler ); 17346810ad6fSSam Leffler } 173539bbca6fSFabien Thomas 173639bbca6fSFabien Thomas ret_worker = CRYPTO_RETW(0); 173739bbca6fSFabien Thomas if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { 17386810ad6fSSam Leffler db_printf("%4s %5s %8s %4s %8s\n", 17396810ad6fSSam Leffler "Op", "Status", "CRID", "HID", "Callback"); 174039bbca6fSFabien Thomas TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) { 17416810ad6fSSam Leffler db_printf("%4u %5u %08x %4u %8p\n" 17426810ad6fSSam Leffler , krp->krp_op 17436810ad6fSSam Leffler , krp->krp_status 17446810ad6fSSam Leffler , krp->krp_crid, krp->krp_hid 17456810ad6fSSam Leffler , krp->krp_callback 17466810ad6fSSam Leffler ); 17476810ad6fSSam Leffler } 17486810ad6fSSam Leffler } 17496810ad6fSSam Leffler } 17506810ad6fSSam Leffler #endif 17516810ad6fSSam Leffler 17526810ad6fSSam Leffler int crypto_modevent(module_t mod, int type, void *unused); 17536810ad6fSSam Leffler 17546810ad6fSSam Leffler /* 17556810ad6fSSam Leffler * Initialization code, both for static and dynamic loading. 17566810ad6fSSam Leffler * Note this is not invoked with the usual MODULE_DECLARE 17576810ad6fSSam Leffler * mechanism but instead is listed as a dependency by the 17586810ad6fSSam Leffler * cryptosoft driver. This guarantees proper ordering of 17596810ad6fSSam Leffler * calls on module load/unload. 17606810ad6fSSam Leffler */ 17616810ad6fSSam Leffler int 17626810ad6fSSam Leffler crypto_modevent(module_t mod, int type, void *unused) 17636810ad6fSSam Leffler { 17646810ad6fSSam Leffler int error = EINVAL; 17656810ad6fSSam Leffler 17666810ad6fSSam Leffler switch (type) { 17676810ad6fSSam Leffler case MOD_LOAD: 17686810ad6fSSam Leffler error = crypto_init(); 17696810ad6fSSam Leffler if (error == 0 && bootverbose) 17706810ad6fSSam Leffler printf("crypto: <crypto core>\n"); 17716810ad6fSSam Leffler break; 17726810ad6fSSam Leffler case MOD_UNLOAD: 17736810ad6fSSam Leffler /*XXX disallow if active sessions */ 17746810ad6fSSam Leffler error = 0; 17756810ad6fSSam Leffler crypto_destroy(); 17766810ad6fSSam Leffler return 0; 17776810ad6fSSam Leffler } 17786810ad6fSSam Leffler return error; 17796810ad6fSSam Leffler } 17806810ad6fSSam Leffler MODULE_VERSION(crypto, 1); 17816810ad6fSSam Leffler MODULE_DEPEND(crypto, zlib, 1, 1, 1); 1782