16810ad6fSSam Leffler /*- 26810ad6fSSam Leffler * Copyright (c) 2002-2006 Sam Leffler. All rights reserved. 36810ad6fSSam Leffler * 46810ad6fSSam Leffler * Redistribution and use in source and binary forms, with or without 56810ad6fSSam Leffler * modification, are permitted provided that the following conditions 66810ad6fSSam Leffler * are met: 76810ad6fSSam Leffler * 1. Redistributions of source code must retain the above copyright 86810ad6fSSam Leffler * notice, this list of conditions and the following disclaimer. 96810ad6fSSam Leffler * 2. Redistributions in binary form must reproduce the above copyright 106810ad6fSSam Leffler * notice, this list of conditions and the following disclaimer in the 116810ad6fSSam Leffler * documentation and/or other materials provided with the distribution. 126810ad6fSSam Leffler * 136810ad6fSSam Leffler * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 146810ad6fSSam Leffler * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 156810ad6fSSam Leffler * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 166810ad6fSSam Leffler * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 176810ad6fSSam Leffler * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 186810ad6fSSam Leffler * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 196810ad6fSSam Leffler * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 206810ad6fSSam Leffler * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 216810ad6fSSam Leffler * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 226810ad6fSSam Leffler * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 236810ad6fSSam Leffler */ 246810ad6fSSam Leffler 256810ad6fSSam Leffler #include <sys/cdefs.h> 266810ad6fSSam Leffler __FBSDID("$FreeBSD$"); 276810ad6fSSam Leffler 286810ad6fSSam Leffler /* 296810ad6fSSam Leffler * Cryptographic Subsystem. 306810ad6fSSam Leffler * 316810ad6fSSam Leffler * This code is derived from the Openbsd Cryptographic Framework (OCF) 326810ad6fSSam Leffler * that has the copyright shown below. Very little of the original 336810ad6fSSam Leffler * code remains. 346810ad6fSSam Leffler */ 356810ad6fSSam Leffler 3660727d8bSWarner Losh /*- 37091d81d1SSam Leffler * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 38091d81d1SSam Leffler * 39091d81d1SSam Leffler * This code was written by Angelos D. Keromytis in Athens, Greece, in 40091d81d1SSam Leffler * February 2000. Network Security Technologies Inc. (NSTI) kindly 41091d81d1SSam Leffler * supported the development of this code. 42091d81d1SSam Leffler * 43091d81d1SSam Leffler * Copyright (c) 2000, 2001 Angelos D. Keromytis 44091d81d1SSam Leffler * 45091d81d1SSam Leffler * Permission to use, copy, and modify this software with or without fee 46091d81d1SSam Leffler * is hereby granted, provided that this entire notice is included in 47091d81d1SSam Leffler * all source code copies of any software which is or includes a copy or 48091d81d1SSam Leffler * modification of this software. 49091d81d1SSam Leffler * 50091d81d1SSam Leffler * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 51091d81d1SSam Leffler * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 52091d81d1SSam Leffler * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 53091d81d1SSam Leffler * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 54091d81d1SSam Leffler * PURPOSE. 55091d81d1SSam Leffler */ 562c446514SDavid E. O'Brien 577d1853eeSSam Leffler #define CRYPTO_TIMING /* enable timing support */ 58091d81d1SSam Leffler 596810ad6fSSam Leffler #include "opt_ddb.h" 606810ad6fSSam Leffler 61091d81d1SSam Leffler #include <sys/param.h> 62091d81d1SSam Leffler #include <sys/systm.h> 63091d81d1SSam Leffler #include <sys/eventhandler.h> 64091d81d1SSam Leffler #include <sys/kernel.h> 65091d81d1SSam Leffler #include <sys/kthread.h> 66091d81d1SSam Leffler #include <sys/lock.h> 675dba30f1SPoul-Henning Kamp #include <sys/module.h> 68091d81d1SSam Leffler #include <sys/mutex.h> 69091d81d1SSam Leffler #include <sys/malloc.h> 70091d81d1SSam Leffler #include <sys/proc.h> 71091d81d1SSam Leffler #include <sys/sysctl.h> 72091d81d1SSam Leffler 736810ad6fSSam Leffler #include <ddb/ddb.h> 746810ad6fSSam Leffler 75091d81d1SSam Leffler #include <vm/uma.h> 76091d81d1SSam Leffler #include <opencrypto/cryptodev.h> 771a91ccccSSam Leffler #include <opencrypto/xform.h> /* XXX for M_XDATA */ 78091d81d1SSam Leffler 796810ad6fSSam Leffler #include <sys/kobj.h> 806810ad6fSSam Leffler #include <sys/bus.h> 816810ad6fSSam Leffler #include "cryptodev_if.h" 826810ad6fSSam Leffler 83091d81d1SSam Leffler /* 84091d81d1SSam Leffler * Crypto drivers register themselves by allocating a slot in the 85091d81d1SSam Leffler * crypto_drivers table with crypto_get_driverid() and then registering 86091d81d1SSam Leffler * each algorithm they support with crypto_register() and crypto_kregister(). 87091d81d1SSam Leffler */ 88091d81d1SSam Leffler static struct mtx crypto_drivers_mtx; /* lock on driver table */ 89091d81d1SSam Leffler #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) 90091d81d1SSam Leffler #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) 916810ad6fSSam Leffler #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED) 926810ad6fSSam Leffler 936810ad6fSSam Leffler /* 946810ad6fSSam Leffler * Crypto device/driver capabilities structure. 956810ad6fSSam Leffler * 966810ad6fSSam Leffler * Synchronization: 976810ad6fSSam Leffler * (d) - protected by CRYPTO_DRIVER_LOCK() 986810ad6fSSam Leffler * (q) - protected by CRYPTO_Q_LOCK() 996810ad6fSSam Leffler * Not tagged fields are read-only. 1006810ad6fSSam Leffler */ 1016810ad6fSSam Leffler struct cryptocap { 1026810ad6fSSam Leffler device_t cc_dev; /* (d) device/driver */ 1036810ad6fSSam Leffler u_int32_t cc_sessions; /* (d) # of sessions */ 1046810ad6fSSam Leffler u_int32_t cc_koperations; /* (d) # os asym operations */ 1056810ad6fSSam Leffler /* 1066810ad6fSSam Leffler * Largest possible operator length (in bits) for each type of 1076810ad6fSSam Leffler * encryption algorithm. XXX not used 1086810ad6fSSam Leffler */ 1096810ad6fSSam Leffler u_int16_t cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1]; 1106810ad6fSSam Leffler u_int8_t cc_alg[CRYPTO_ALGORITHM_MAX + 1]; 1116810ad6fSSam Leffler u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1]; 1126810ad6fSSam Leffler 1136810ad6fSSam Leffler int cc_flags; /* (d) flags */ 1146810ad6fSSam Leffler #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */ 1156810ad6fSSam Leffler int cc_qblocked; /* (q) symmetric q blocked */ 1166810ad6fSSam Leffler int cc_kqblocked; /* (q) asymmetric q blocked */ 1176810ad6fSSam Leffler }; 118091d81d1SSam Leffler static struct cryptocap *crypto_drivers = NULL; 119091d81d1SSam Leffler static int crypto_drivers_num = 0; 120091d81d1SSam Leffler 121091d81d1SSam Leffler /* 122091d81d1SSam Leffler * There are two queues for crypto requests; one for symmetric (e.g. 123091d81d1SSam Leffler * cipher) operations and one for asymmetric (e.g. MOD)operations. 124091d81d1SSam Leffler * A single mutex is used to lock access to both queues. We could 125091d81d1SSam Leffler * have one per-queue but having one simplifies handling of block/unblock 126091d81d1SSam Leffler * operations. 127091d81d1SSam Leffler */ 1283a865c82SPawel Jakub Dawidek static int crp_sleep = 0; 129091d81d1SSam Leffler static TAILQ_HEAD(,cryptop) crp_q; /* request queues */ 130091d81d1SSam Leffler static TAILQ_HEAD(,cryptkop) crp_kq; 131091d81d1SSam Leffler static struct mtx crypto_q_mtx; 132091d81d1SSam Leffler #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) 133091d81d1SSam Leffler #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) 134091d81d1SSam Leffler 135091d81d1SSam Leffler /* 136091d81d1SSam Leffler * There are two queues for processing completed crypto requests; one 137091d81d1SSam Leffler * for the symmetric and one for the asymmetric ops. We only need one 138091d81d1SSam Leffler * but have two to avoid type futzing (cryptop vs. cryptkop). A single 139091d81d1SSam Leffler * mutex is used to lock access to both queues. Note that this lock 140091d81d1SSam Leffler * must be separate from the lock on request queues to insure driver 141091d81d1SSam Leffler * callbacks don't generate lock order reversals. 142091d81d1SSam Leffler */ 143091d81d1SSam Leffler static TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queues */ 144091d81d1SSam Leffler static TAILQ_HEAD(,cryptkop) crp_ret_kq; 145091d81d1SSam Leffler static struct mtx crypto_ret_q_mtx; 146091d81d1SSam Leffler #define CRYPTO_RETQ_LOCK() mtx_lock(&crypto_ret_q_mtx) 147091d81d1SSam Leffler #define CRYPTO_RETQ_UNLOCK() mtx_unlock(&crypto_ret_q_mtx) 1489c12ca29SPawel Jakub Dawidek #define CRYPTO_RETQ_EMPTY() (TAILQ_EMPTY(&crp_ret_q) && TAILQ_EMPTY(&crp_ret_kq)) 149091d81d1SSam Leffler 150091d81d1SSam Leffler static uma_zone_t cryptop_zone; 151091d81d1SSam Leffler static uma_zone_t cryptodesc_zone; 152091d81d1SSam Leffler 153091d81d1SSam Leffler int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */ 154091d81d1SSam Leffler SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW, 155091d81d1SSam Leffler &crypto_userasymcrypto, 0, 156091d81d1SSam Leffler "Enable/disable user-mode access to asymmetric crypto support"); 157091d81d1SSam Leffler int crypto_devallowsoft = 0; /* only use hardware crypto for asym */ 158091d81d1SSam Leffler SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW, 159091d81d1SSam Leffler &crypto_devallowsoft, 0, 160091d81d1SSam Leffler "Enable/disable use of software asym crypto support"); 161091d81d1SSam Leffler 162091d81d1SSam Leffler MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); 163091d81d1SSam Leffler 16451e45326SSam Leffler static void crypto_proc(void); 16551e45326SSam Leffler static struct proc *cryptoproc; 16651e45326SSam Leffler static void crypto_ret_proc(void); 16751e45326SSam Leffler static struct proc *cryptoretproc; 16851e45326SSam Leffler static void crypto_destroy(void); 1694acae0acSPawel Jakub Dawidek static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); 1706810ad6fSSam Leffler static int crypto_kinvoke(struct cryptkop *krp, int flags); 17151e45326SSam Leffler 1727d1853eeSSam Leffler static struct cryptostats cryptostats; 1737d1853eeSSam Leffler SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats, 1747d1853eeSSam Leffler cryptostats, "Crypto system statistics"); 1757d1853eeSSam Leffler 1767d1853eeSSam Leffler #ifdef CRYPTO_TIMING 1777d1853eeSSam Leffler static int crypto_timing = 0; 1787d1853eeSSam Leffler SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW, 1797d1853eeSSam Leffler &crypto_timing, 0, "Enable/disable crypto timing support"); 1807d1853eeSSam Leffler #endif 1817d1853eeSSam Leffler 18251e45326SSam Leffler static int 183091d81d1SSam Leffler crypto_init(void) 184091d81d1SSam Leffler { 18551e45326SSam Leffler int error; 186091d81d1SSam Leffler 1873569ae7fSSam Leffler mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table", 1883569ae7fSSam Leffler MTX_DEF|MTX_QUIET); 189091d81d1SSam Leffler 190091d81d1SSam Leffler TAILQ_INIT(&crp_q); 191091d81d1SSam Leffler TAILQ_INIT(&crp_kq); 1923569ae7fSSam Leffler mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF); 193091d81d1SSam Leffler 194091d81d1SSam Leffler TAILQ_INIT(&crp_ret_q); 195091d81d1SSam Leffler TAILQ_INIT(&crp_ret_kq); 1963569ae7fSSam Leffler mtx_init(&crypto_ret_q_mtx, "crypto", "crypto return queues", MTX_DEF); 19751e45326SSam Leffler 19851e45326SSam Leffler cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop), 19951e45326SSam Leffler 0, 0, 0, 0, 20051e45326SSam Leffler UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 20151e45326SSam Leffler cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc), 20251e45326SSam Leffler 0, 0, 0, 0, 20351e45326SSam Leffler UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 20451e45326SSam Leffler if (cryptodesc_zone == NULL || cryptop_zone == NULL) { 20551e45326SSam Leffler printf("crypto_init: cannot setup crypto zones\n"); 20651e45326SSam Leffler error = ENOMEM; 20751e45326SSam Leffler goto bad; 20851e45326SSam Leffler } 20951e45326SSam Leffler 21051e45326SSam Leffler crypto_drivers_num = CRYPTO_DRIVERS_INITIAL; 21151e45326SSam Leffler crypto_drivers = malloc(crypto_drivers_num * 21251e45326SSam Leffler sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO); 21351e45326SSam Leffler if (crypto_drivers == NULL) { 21451e45326SSam Leffler printf("crypto_init: cannot setup crypto drivers\n"); 21551e45326SSam Leffler error = ENOMEM; 21651e45326SSam Leffler goto bad; 21751e45326SSam Leffler } 21851e45326SSam Leffler 21951e45326SSam Leffler error = kthread_create((void (*)(void *)) crypto_proc, NULL, 22051e45326SSam Leffler &cryptoproc, 0, 0, "crypto"); 22151e45326SSam Leffler if (error) { 22251e45326SSam Leffler printf("crypto_init: cannot start crypto thread; error %d", 22351e45326SSam Leffler error); 22451e45326SSam Leffler goto bad; 22551e45326SSam Leffler } 22651e45326SSam Leffler 22751e45326SSam Leffler error = kthread_create((void (*)(void *)) crypto_ret_proc, NULL, 22851e45326SSam Leffler &cryptoretproc, 0, 0, "crypto returns"); 22951e45326SSam Leffler if (error) { 23051e45326SSam Leffler printf("crypto_init: cannot start cryptoret thread; error %d", 23151e45326SSam Leffler error); 23251e45326SSam Leffler goto bad; 23351e45326SSam Leffler } 23451e45326SSam Leffler return 0; 23551e45326SSam Leffler bad: 23651e45326SSam Leffler crypto_destroy(); 23751e45326SSam Leffler return error; 23851e45326SSam Leffler } 23951e45326SSam Leffler 24051e45326SSam Leffler /* 24151e45326SSam Leffler * Signal a crypto thread to terminate. We use the driver 24251e45326SSam Leffler * table lock to synchronize the sleep/wakeups so that we 24351e45326SSam Leffler * are sure the threads have terminated before we release 24451e45326SSam Leffler * the data structures they use. See crypto_finis below 24551e45326SSam Leffler * for the other half of this song-and-dance. 24651e45326SSam Leffler */ 24751e45326SSam Leffler static void 24851e45326SSam Leffler crypto_terminate(struct proc **pp, void *q) 24951e45326SSam Leffler { 25051e45326SSam Leffler struct proc *p; 25151e45326SSam Leffler 25251e45326SSam Leffler mtx_assert(&crypto_drivers_mtx, MA_OWNED); 25351e45326SSam Leffler p = *pp; 25451e45326SSam Leffler *pp = NULL; 25551e45326SSam Leffler if (p) { 25651e45326SSam Leffler wakeup_one(q); 25751e45326SSam Leffler PROC_LOCK(p); /* NB: insure we don't miss wakeup */ 25851e45326SSam Leffler CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */ 25951e45326SSam Leffler msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0); 26051e45326SSam Leffler PROC_UNLOCK(p); 26151e45326SSam Leffler CRYPTO_DRIVER_LOCK(); 26251e45326SSam Leffler } 26351e45326SSam Leffler } 26451e45326SSam Leffler 26551e45326SSam Leffler static void 26651e45326SSam Leffler crypto_destroy(void) 26751e45326SSam Leffler { 26851e45326SSam Leffler /* 26951e45326SSam Leffler * Terminate any crypto threads. 27051e45326SSam Leffler */ 27151e45326SSam Leffler CRYPTO_DRIVER_LOCK(); 27251e45326SSam Leffler crypto_terminate(&cryptoproc, &crp_q); 27351e45326SSam Leffler crypto_terminate(&cryptoretproc, &crp_ret_q); 27451e45326SSam Leffler CRYPTO_DRIVER_UNLOCK(); 27551e45326SSam Leffler 27651e45326SSam Leffler /* XXX flush queues??? */ 27751e45326SSam Leffler 27851e45326SSam Leffler /* 27951e45326SSam Leffler * Reclaim dynamically allocated resources. 28051e45326SSam Leffler */ 28151e45326SSam Leffler if (crypto_drivers != NULL) 28251e45326SSam Leffler free(crypto_drivers, M_CRYPTO_DATA); 28351e45326SSam Leffler 28451e45326SSam Leffler if (cryptodesc_zone != NULL) 28551e45326SSam Leffler uma_zdestroy(cryptodesc_zone); 28651e45326SSam Leffler if (cryptop_zone != NULL) 28751e45326SSam Leffler uma_zdestroy(cryptop_zone); 28851e45326SSam Leffler mtx_destroy(&crypto_q_mtx); 28951e45326SSam Leffler mtx_destroy(&crypto_ret_q_mtx); 29051e45326SSam Leffler mtx_destroy(&crypto_drivers_mtx); 291091d81d1SSam Leffler } 292f544a528SMark Murray 2936810ad6fSSam Leffler static struct cryptocap * 2946810ad6fSSam Leffler crypto_checkdriver(u_int32_t hid) 2956810ad6fSSam Leffler { 2966810ad6fSSam Leffler if (crypto_drivers == NULL) 2976810ad6fSSam Leffler return NULL; 2986810ad6fSSam Leffler return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]); 2996810ad6fSSam Leffler } 3006810ad6fSSam Leffler 301f544a528SMark Murray /* 3026810ad6fSSam Leffler * Compare a driver's list of supported algorithms against another 3036810ad6fSSam Leffler * list; return non-zero if all algorithms are supported. 304f544a528SMark Murray */ 305f544a528SMark Murray static int 3066810ad6fSSam Leffler driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri) 307f544a528SMark Murray { 3086810ad6fSSam Leffler const struct cryptoini *cr; 30951e45326SSam Leffler 3106810ad6fSSam Leffler /* See if all the algorithms are supported. */ 3116810ad6fSSam Leffler for (cr = cri; cr; cr = cr->cri_next) 3126810ad6fSSam Leffler if (cap->cc_alg[cr->cri_alg] == 0) 313f544a528SMark Murray return 0; 3146810ad6fSSam Leffler return 1; 315f544a528SMark Murray } 316f544a528SMark Murray 317091d81d1SSam Leffler /* 3186810ad6fSSam Leffler * Select a driver for a new session that supports the specified 3196810ad6fSSam Leffler * algorithms and, optionally, is constrained according to the flags. 320091d81d1SSam Leffler * The algorithm we use here is pretty stupid; just use the 3216810ad6fSSam Leffler * first driver that supports all the algorithms we need. If there 3226810ad6fSSam Leffler * are multiple drivers we choose the driver with the fewest active 3236810ad6fSSam Leffler * sessions. We prefer hardware-backed drivers to software ones. 324091d81d1SSam Leffler * 325091d81d1SSam Leffler * XXX We need more smarts here (in real life too, but that's 326091d81d1SSam Leffler * XXX another story altogether). 327091d81d1SSam Leffler */ 3286810ad6fSSam Leffler static struct cryptocap * 3296810ad6fSSam Leffler crypto_select_driver(const struct cryptoini *cri, int flags) 3306810ad6fSSam Leffler { 3316810ad6fSSam Leffler struct cryptocap *cap, *best; 3326810ad6fSSam Leffler int match, hid; 3336810ad6fSSam Leffler 3346810ad6fSSam Leffler CRYPTO_DRIVER_ASSERT(); 335091d81d1SSam Leffler 336694e0113SPawel Jakub Dawidek /* 3376810ad6fSSam Leffler * Look first for hardware crypto devices if permitted. 338694e0113SPawel Jakub Dawidek */ 3396810ad6fSSam Leffler if (flags & CRYPTOCAP_F_HARDWARE) 3406810ad6fSSam Leffler match = CRYPTOCAP_F_HARDWARE; 3416810ad6fSSam Leffler else 3426810ad6fSSam Leffler match = CRYPTOCAP_F_SOFTWARE; 3436810ad6fSSam Leffler best = NULL; 3446810ad6fSSam Leffler again: 345091d81d1SSam Leffler for (hid = 0; hid < crypto_drivers_num; hid++) { 346694e0113SPawel Jakub Dawidek cap = &crypto_drivers[hid]; 347091d81d1SSam Leffler /* 3486810ad6fSSam Leffler * If it's not initialized, is in the process of 3496810ad6fSSam Leffler * going away, or is not appropriate (hardware 3506810ad6fSSam Leffler * or software based on match), then skip. 351091d81d1SSam Leffler */ 3526810ad6fSSam Leffler if (cap->cc_dev == NULL || 3536810ad6fSSam Leffler (cap->cc_flags & CRYPTOCAP_F_CLEANUP) || 3546810ad6fSSam Leffler (cap->cc_flags & match) == 0) 355091d81d1SSam Leffler continue; 356091d81d1SSam Leffler 3576810ad6fSSam Leffler /* verify all the algorithms are supported. */ 3586810ad6fSSam Leffler if (driver_suitable(cap, cri)) { 3596810ad6fSSam Leffler if (best == NULL || 3606810ad6fSSam Leffler cap->cc_sessions < best->cc_sessions) 3616810ad6fSSam Leffler best = cap; 3626810ad6fSSam Leffler } 3636810ad6fSSam Leffler } 3646810ad6fSSam Leffler if (best != NULL) 3656810ad6fSSam Leffler return best; 3666810ad6fSSam Leffler if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) { 3676810ad6fSSam Leffler /* sort of an Algol 68-style for loop */ 3686810ad6fSSam Leffler match = CRYPTOCAP_F_SOFTWARE; 3696810ad6fSSam Leffler goto again; 3706810ad6fSSam Leffler } 3716810ad6fSSam Leffler return best; 3726810ad6fSSam Leffler } 373091d81d1SSam Leffler 374694e0113SPawel Jakub Dawidek /* 3756810ad6fSSam Leffler * Create a new session. The crid argument specifies a crypto 3766810ad6fSSam Leffler * driver to use or constraints on a driver to select (hardware 3776810ad6fSSam Leffler * only, software only, either). Whatever driver is selected 3786810ad6fSSam Leffler * must be capable of the requested crypto algorithms. 379694e0113SPawel Jakub Dawidek */ 3806810ad6fSSam Leffler int 3816810ad6fSSam Leffler crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int crid) 3826810ad6fSSam Leffler { 3836810ad6fSSam Leffler struct cryptocap *cap; 3846810ad6fSSam Leffler u_int32_t hid, lid; 3856810ad6fSSam Leffler int err; 3866810ad6fSSam Leffler 3876810ad6fSSam Leffler CRYPTO_DRIVER_LOCK(); 3886810ad6fSSam Leffler if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 389694e0113SPawel Jakub Dawidek /* 3906810ad6fSSam Leffler * Use specified driver; verify it is capable. 391694e0113SPawel Jakub Dawidek */ 3926810ad6fSSam Leffler cap = crypto_checkdriver(crid); 3936810ad6fSSam Leffler if (cap != NULL && !driver_suitable(cap, cri)) 394694e0113SPawel Jakub Dawidek cap = NULL; 3956810ad6fSSam Leffler } else { 3966810ad6fSSam Leffler /* 3976810ad6fSSam Leffler * No requested driver; select based on crid flags. 3986810ad6fSSam Leffler */ 3996810ad6fSSam Leffler cap = crypto_select_driver(cri, crid); 4006810ad6fSSam Leffler /* 4016810ad6fSSam Leffler * if NULL then can't do everything in one session. 4026810ad6fSSam Leffler * XXX Fix this. We need to inject a "virtual" session 4036810ad6fSSam Leffler * XXX layer right about here. 4046810ad6fSSam Leffler */ 405694e0113SPawel Jakub Dawidek } 406694e0113SPawel Jakub Dawidek if (cap != NULL) { 407091d81d1SSam Leffler /* Call the driver initialization routine. */ 4086810ad6fSSam Leffler hid = cap - crypto_drivers; 409091d81d1SSam Leffler lid = hid; /* Pass the driver ID. */ 4106810ad6fSSam Leffler err = CRYPTODEV_NEWSESSION(cap->cc_dev, &lid, cri); 411091d81d1SSam Leffler if (err == 0) { 4126810ad6fSSam Leffler (*sid) = (cap->cc_flags & 0xff000000) 4136810ad6fSSam Leffler | (hid & 0x00ffffff); 414091d81d1SSam Leffler (*sid) <<= 32; 415091d81d1SSam Leffler (*sid) |= (lid & 0xffffffff); 41607d0c94aSSam Leffler cap->cc_sessions++; 417091d81d1SSam Leffler } 4186810ad6fSSam Leffler } else 4196810ad6fSSam Leffler err = EINVAL; 420091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 421091d81d1SSam Leffler return err; 422091d81d1SSam Leffler } 423091d81d1SSam Leffler 4244acae0acSPawel Jakub Dawidek static void 4254acae0acSPawel Jakub Dawidek crypto_remove(struct cryptocap *cap) 4264acae0acSPawel Jakub Dawidek { 4274acae0acSPawel Jakub Dawidek 4284acae0acSPawel Jakub Dawidek mtx_assert(&crypto_drivers_mtx, MA_OWNED); 4294acae0acSPawel Jakub Dawidek if (cap->cc_sessions == 0 && cap->cc_koperations == 0) 4304acae0acSPawel Jakub Dawidek bzero(cap, sizeof(*cap)); 4314acae0acSPawel Jakub Dawidek } 4324acae0acSPawel Jakub Dawidek 433091d81d1SSam Leffler /* 434091d81d1SSam Leffler * Delete an existing session (or a reserved session on an unregistered 435091d81d1SSam Leffler * driver). 436091d81d1SSam Leffler */ 437091d81d1SSam Leffler int 438091d81d1SSam Leffler crypto_freesession(u_int64_t sid) 439091d81d1SSam Leffler { 4404acae0acSPawel Jakub Dawidek struct cryptocap *cap; 441091d81d1SSam Leffler u_int32_t hid; 442091d81d1SSam Leffler int err; 443091d81d1SSam Leffler 444091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 445091d81d1SSam Leffler 446091d81d1SSam Leffler if (crypto_drivers == NULL) { 447091d81d1SSam Leffler err = EINVAL; 448091d81d1SSam Leffler goto done; 449091d81d1SSam Leffler } 450091d81d1SSam Leffler 451091d81d1SSam Leffler /* Determine two IDs. */ 45207d0c94aSSam Leffler hid = CRYPTO_SESID2HID(sid); 453091d81d1SSam Leffler 454091d81d1SSam Leffler if (hid >= crypto_drivers_num) { 455091d81d1SSam Leffler err = ENOENT; 456091d81d1SSam Leffler goto done; 457091d81d1SSam Leffler } 4584acae0acSPawel Jakub Dawidek cap = &crypto_drivers[hid]; 459091d81d1SSam Leffler 4604acae0acSPawel Jakub Dawidek if (cap->cc_sessions) 4614acae0acSPawel Jakub Dawidek cap->cc_sessions--; 462091d81d1SSam Leffler 463091d81d1SSam Leffler /* Call the driver cleanup routine, if available. */ 4646810ad6fSSam Leffler err = CRYPTODEV_FREESESSION(cap->cc_dev, sid); 465091d81d1SSam Leffler 4664acae0acSPawel Jakub Dawidek if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) 4674acae0acSPawel Jakub Dawidek crypto_remove(cap); 468091d81d1SSam Leffler 469091d81d1SSam Leffler done: 470091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 471091d81d1SSam Leffler return err; 472091d81d1SSam Leffler } 473091d81d1SSam Leffler 474091d81d1SSam Leffler /* 475091d81d1SSam Leffler * Return an unused driver id. Used by drivers prior to registering 476091d81d1SSam Leffler * support for the algorithms they handle. 477091d81d1SSam Leffler */ 478091d81d1SSam Leffler int32_t 4796810ad6fSSam Leffler crypto_get_driverid(device_t dev, int flags) 480091d81d1SSam Leffler { 481091d81d1SSam Leffler struct cryptocap *newdrv; 482091d81d1SSam Leffler int i; 483091d81d1SSam Leffler 4846810ad6fSSam Leffler if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 4856810ad6fSSam Leffler printf("%s: no flags specified when registering driver\n", 4866810ad6fSSam Leffler device_get_nameunit(dev)); 4876810ad6fSSam Leffler return -1; 4886810ad6fSSam Leffler } 4896810ad6fSSam Leffler 490091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 491091d81d1SSam Leffler 4924acae0acSPawel Jakub Dawidek for (i = 0; i < crypto_drivers_num; i++) { 4936810ad6fSSam Leffler if (crypto_drivers[i].cc_dev == NULL && 4944acae0acSPawel Jakub Dawidek (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) { 495091d81d1SSam Leffler break; 4964acae0acSPawel Jakub Dawidek } 4974acae0acSPawel Jakub Dawidek } 498091d81d1SSam Leffler 499091d81d1SSam Leffler /* Out of entries, allocate some more. */ 500091d81d1SSam Leffler if (i == crypto_drivers_num) { 501091d81d1SSam Leffler /* Be careful about wrap-around. */ 502091d81d1SSam Leffler if (2 * crypto_drivers_num <= crypto_drivers_num) { 503091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 504091d81d1SSam Leffler printf("crypto: driver count wraparound!\n"); 505091d81d1SSam Leffler return -1; 506091d81d1SSam Leffler } 507091d81d1SSam Leffler 508091d81d1SSam Leffler newdrv = malloc(2 * crypto_drivers_num * 509091d81d1SSam Leffler sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 510091d81d1SSam Leffler if (newdrv == NULL) { 511091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 512091d81d1SSam Leffler printf("crypto: no space to expand driver table!\n"); 513091d81d1SSam Leffler return -1; 514091d81d1SSam Leffler } 515091d81d1SSam Leffler 516091d81d1SSam Leffler bcopy(crypto_drivers, newdrv, 517091d81d1SSam Leffler crypto_drivers_num * sizeof(struct cryptocap)); 518091d81d1SSam Leffler 519091d81d1SSam Leffler crypto_drivers_num *= 2; 520091d81d1SSam Leffler 521091d81d1SSam Leffler free(crypto_drivers, M_CRYPTO_DATA); 522091d81d1SSam Leffler crypto_drivers = newdrv; 523091d81d1SSam Leffler } 524091d81d1SSam Leffler 525091d81d1SSam Leffler /* NB: state is zero'd on free */ 526091d81d1SSam Leffler crypto_drivers[i].cc_sessions = 1; /* Mark */ 5276810ad6fSSam Leffler crypto_drivers[i].cc_dev = dev; 528091d81d1SSam Leffler crypto_drivers[i].cc_flags = flags; 529091d81d1SSam Leffler if (bootverbose) 5306810ad6fSSam Leffler printf("crypto: assign %s driver id %u, flags %u\n", 5316810ad6fSSam Leffler device_get_nameunit(dev), i, flags); 532091d81d1SSam Leffler 533091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 534091d81d1SSam Leffler 535091d81d1SSam Leffler return i; 536091d81d1SSam Leffler } 537091d81d1SSam Leffler 5386810ad6fSSam Leffler /* 5396810ad6fSSam Leffler * Lookup a driver by name. We match against the full device 5406810ad6fSSam Leffler * name and unit, and against just the name. The latter gives 5416810ad6fSSam Leffler * us a simple widlcarding by device name. On success return the 5426810ad6fSSam Leffler * driver/hardware identifier; otherwise return -1. 5436810ad6fSSam Leffler */ 5446810ad6fSSam Leffler int 5456810ad6fSSam Leffler crypto_find_driver(const char *match) 546091d81d1SSam Leffler { 5476810ad6fSSam Leffler int i, len = strlen(match); 5486810ad6fSSam Leffler 5496810ad6fSSam Leffler CRYPTO_DRIVER_LOCK(); 5506810ad6fSSam Leffler for (i = 0; i < crypto_drivers_num; i++) { 5516810ad6fSSam Leffler device_t dev = crypto_drivers[i].cc_dev; 5526810ad6fSSam Leffler if (dev == NULL || 5536810ad6fSSam Leffler (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP)) 5546810ad6fSSam Leffler continue; 5556810ad6fSSam Leffler if (strncmp(match, device_get_nameunit(dev), len) == 0 || 5566810ad6fSSam Leffler strncmp(match, device_get_name(dev), len) == 0) 5576810ad6fSSam Leffler break; 5586810ad6fSSam Leffler } 5596810ad6fSSam Leffler CRYPTO_DRIVER_UNLOCK(); 5606810ad6fSSam Leffler return i < crypto_drivers_num ? i : -1; 5616810ad6fSSam Leffler } 5626810ad6fSSam Leffler 5636810ad6fSSam Leffler /* 5646810ad6fSSam Leffler * Return the device_t for the specified driver or NULL 5656810ad6fSSam Leffler * if the driver identifier is invalid. 5666810ad6fSSam Leffler */ 5676810ad6fSSam Leffler device_t 5686810ad6fSSam Leffler crypto_find_device_byhid(int hid) 5696810ad6fSSam Leffler { 5706810ad6fSSam Leffler struct cryptocap *cap = crypto_checkdriver(hid); 5716810ad6fSSam Leffler return cap != NULL ? cap->cc_dev : NULL; 5726810ad6fSSam Leffler } 5736810ad6fSSam Leffler 5746810ad6fSSam Leffler /* 5756810ad6fSSam Leffler * Return the device/driver capabilities. 5766810ad6fSSam Leffler */ 5776810ad6fSSam Leffler int 5786810ad6fSSam Leffler crypto_getcaps(int hid) 5796810ad6fSSam Leffler { 5806810ad6fSSam Leffler struct cryptocap *cap = crypto_checkdriver(hid); 5816810ad6fSSam Leffler return cap != NULL ? cap->cc_flags : 0; 582091d81d1SSam Leffler } 583091d81d1SSam Leffler 584091d81d1SSam Leffler /* 585091d81d1SSam Leffler * Register support for a key-related algorithm. This routine 586091d81d1SSam Leffler * is called once for each algorithm supported a driver. 587091d81d1SSam Leffler */ 588091d81d1SSam Leffler int 5896810ad6fSSam Leffler crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags) 590091d81d1SSam Leffler { 591091d81d1SSam Leffler struct cryptocap *cap; 592091d81d1SSam Leffler int err; 593091d81d1SSam Leffler 594091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 595091d81d1SSam Leffler 596091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 597091d81d1SSam Leffler if (cap != NULL && 598091d81d1SSam Leffler (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { 599091d81d1SSam Leffler /* 600091d81d1SSam Leffler * XXX Do some performance testing to determine placing. 601091d81d1SSam Leffler * XXX We probably need an auxiliary data structure that 602091d81d1SSam Leffler * XXX describes relative performances. 603091d81d1SSam Leffler */ 604091d81d1SSam Leffler 605091d81d1SSam Leffler cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 606091d81d1SSam Leffler if (bootverbose) 6076810ad6fSSam Leffler printf("crypto: %s registers key alg %u flags %u\n" 6086810ad6fSSam Leffler , device_get_nameunit(cap->cc_dev) 609091d81d1SSam Leffler , kalg 610091d81d1SSam Leffler , flags 611091d81d1SSam Leffler ); 612091d81d1SSam Leffler err = 0; 613091d81d1SSam Leffler } else 614091d81d1SSam Leffler err = EINVAL; 615091d81d1SSam Leffler 616091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 617091d81d1SSam Leffler return err; 618091d81d1SSam Leffler } 619091d81d1SSam Leffler 620091d81d1SSam Leffler /* 621091d81d1SSam Leffler * Register support for a non-key-related algorithm. This routine 622091d81d1SSam Leffler * is called once for each such algorithm supported by a driver. 623091d81d1SSam Leffler */ 624091d81d1SSam Leffler int 625091d81d1SSam Leffler crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen, 6266810ad6fSSam Leffler u_int32_t flags) 627091d81d1SSam Leffler { 628091d81d1SSam Leffler struct cryptocap *cap; 629091d81d1SSam Leffler int err; 630091d81d1SSam Leffler 631091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 632091d81d1SSam Leffler 633091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 634091d81d1SSam Leffler /* NB: algorithms are in the range [1..max] */ 635091d81d1SSam Leffler if (cap != NULL && 636091d81d1SSam Leffler (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) { 637091d81d1SSam Leffler /* 638091d81d1SSam Leffler * XXX Do some performance testing to determine placing. 639091d81d1SSam Leffler * XXX We probably need an auxiliary data structure that 640091d81d1SSam Leffler * XXX describes relative performances. 641091d81d1SSam Leffler */ 642091d81d1SSam Leffler 643091d81d1SSam Leffler cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 644091d81d1SSam Leffler cap->cc_max_op_len[alg] = maxoplen; 645091d81d1SSam Leffler if (bootverbose) 6466810ad6fSSam Leffler printf("crypto: %s registers alg %u flags %u maxoplen %u\n" 6476810ad6fSSam Leffler , device_get_nameunit(cap->cc_dev) 648091d81d1SSam Leffler , alg 649091d81d1SSam Leffler , flags 650091d81d1SSam Leffler , maxoplen 651091d81d1SSam Leffler ); 652091d81d1SSam Leffler cap->cc_sessions = 0; /* Unmark */ 653091d81d1SSam Leffler err = 0; 654091d81d1SSam Leffler } else 655091d81d1SSam Leffler err = EINVAL; 656091d81d1SSam Leffler 657091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 658091d81d1SSam Leffler return err; 659091d81d1SSam Leffler } 660091d81d1SSam Leffler 6616810ad6fSSam Leffler static void 6626810ad6fSSam Leffler driver_finis(struct cryptocap *cap) 6636810ad6fSSam Leffler { 6646810ad6fSSam Leffler u_int32_t ses, kops; 6656810ad6fSSam Leffler 6666810ad6fSSam Leffler CRYPTO_DRIVER_ASSERT(); 6676810ad6fSSam Leffler 6686810ad6fSSam Leffler ses = cap->cc_sessions; 6696810ad6fSSam Leffler kops = cap->cc_koperations; 6706810ad6fSSam Leffler bzero(cap, sizeof(*cap)); 6716810ad6fSSam Leffler if (ses != 0 || kops != 0) { 6726810ad6fSSam Leffler /* 6736810ad6fSSam Leffler * If there are pending sessions, 6746810ad6fSSam Leffler * just mark as invalid. 6756810ad6fSSam Leffler */ 6766810ad6fSSam Leffler cap->cc_flags |= CRYPTOCAP_F_CLEANUP; 6776810ad6fSSam Leffler cap->cc_sessions = ses; 6786810ad6fSSam Leffler cap->cc_koperations = kops; 6796810ad6fSSam Leffler } 6806810ad6fSSam Leffler } 6816810ad6fSSam Leffler 682091d81d1SSam Leffler /* 683091d81d1SSam Leffler * Unregister a crypto driver. If there are pending sessions using it, 684091d81d1SSam Leffler * leave enough information around so that subsequent calls using those 685091d81d1SSam Leffler * sessions will correctly detect the driver has been unregistered and 686091d81d1SSam Leffler * reroute requests. 687091d81d1SSam Leffler */ 688091d81d1SSam Leffler int 689091d81d1SSam Leffler crypto_unregister(u_int32_t driverid, int alg) 690091d81d1SSam Leffler { 691091d81d1SSam Leffler struct cryptocap *cap; 6924acae0acSPawel Jakub Dawidek int i, err; 693091d81d1SSam Leffler 694091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 695091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 696091d81d1SSam Leffler if (cap != NULL && 697091d81d1SSam Leffler (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) && 698091d81d1SSam Leffler cap->cc_alg[alg] != 0) { 699091d81d1SSam Leffler cap->cc_alg[alg] = 0; 700091d81d1SSam Leffler cap->cc_max_op_len[alg] = 0; 701091d81d1SSam Leffler 702091d81d1SSam Leffler /* Was this the last algorithm ? */ 703091d81d1SSam Leffler for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++) 704091d81d1SSam Leffler if (cap->cc_alg[i] != 0) 705091d81d1SSam Leffler break; 706091d81d1SSam Leffler 7076810ad6fSSam Leffler if (i == CRYPTO_ALGORITHM_MAX + 1) 7086810ad6fSSam Leffler driver_finis(cap); 709091d81d1SSam Leffler err = 0; 710091d81d1SSam Leffler } else 711091d81d1SSam Leffler err = EINVAL; 712091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 7136810ad6fSSam Leffler 714091d81d1SSam Leffler return err; 715091d81d1SSam Leffler } 716091d81d1SSam Leffler 717091d81d1SSam Leffler /* 718091d81d1SSam Leffler * Unregister all algorithms associated with a crypto driver. 719091d81d1SSam Leffler * If there are pending sessions using it, leave enough information 720091d81d1SSam Leffler * around so that subsequent calls using those sessions will 721091d81d1SSam Leffler * correctly detect the driver has been unregistered and reroute 722091d81d1SSam Leffler * requests. 723091d81d1SSam Leffler */ 724091d81d1SSam Leffler int 725091d81d1SSam Leffler crypto_unregister_all(u_int32_t driverid) 726091d81d1SSam Leffler { 727091d81d1SSam Leffler struct cryptocap *cap; 7286810ad6fSSam Leffler int err; 729091d81d1SSam Leffler 730091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 731091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 732091d81d1SSam Leffler if (cap != NULL) { 7336810ad6fSSam Leffler driver_finis(cap); 734091d81d1SSam Leffler err = 0; 735091d81d1SSam Leffler } else 736091d81d1SSam Leffler err = EINVAL; 737091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 7386810ad6fSSam Leffler 739091d81d1SSam Leffler return err; 740091d81d1SSam Leffler } 741091d81d1SSam Leffler 742091d81d1SSam Leffler /* 743091d81d1SSam Leffler * Clear blockage on a driver. The what parameter indicates whether 744091d81d1SSam Leffler * the driver is now ready for cryptop's and/or cryptokop's. 745091d81d1SSam Leffler */ 746091d81d1SSam Leffler int 747091d81d1SSam Leffler crypto_unblock(u_int32_t driverid, int what) 748091d81d1SSam Leffler { 749091d81d1SSam Leffler struct cryptocap *cap; 7503a865c82SPawel Jakub Dawidek int err; 751091d81d1SSam Leffler 752091d81d1SSam Leffler CRYPTO_Q_LOCK(); 753091d81d1SSam Leffler cap = crypto_checkdriver(driverid); 754091d81d1SSam Leffler if (cap != NULL) { 7553a865c82SPawel Jakub Dawidek if (what & CRYPTO_SYMQ) 756091d81d1SSam Leffler cap->cc_qblocked = 0; 7573a865c82SPawel Jakub Dawidek if (what & CRYPTO_ASYMQ) 758091d81d1SSam Leffler cap->cc_kqblocked = 0; 7593a865c82SPawel Jakub Dawidek if (crp_sleep) 7601a91ccccSSam Leffler wakeup_one(&crp_q); 761091d81d1SSam Leffler err = 0; 762091d81d1SSam Leffler } else 763091d81d1SSam Leffler err = EINVAL; 764091d81d1SSam Leffler CRYPTO_Q_UNLOCK(); 765091d81d1SSam Leffler 766091d81d1SSam Leffler return err; 767091d81d1SSam Leffler } 768091d81d1SSam Leffler 769091d81d1SSam Leffler /* 770091d81d1SSam Leffler * Add a crypto request to a queue, to be processed by the kernel thread. 771091d81d1SSam Leffler */ 772091d81d1SSam Leffler int 773091d81d1SSam Leffler crypto_dispatch(struct cryptop *crp) 774091d81d1SSam Leffler { 7754acae0acSPawel Jakub Dawidek struct cryptocap *cap; 7764acae0acSPawel Jakub Dawidek u_int32_t hid; 7774acae0acSPawel Jakub Dawidek int result; 778091d81d1SSam Leffler 7797d1853eeSSam Leffler cryptostats.cs_ops++; 7807d1853eeSSam Leffler 7817d1853eeSSam Leffler #ifdef CRYPTO_TIMING 7827d1853eeSSam Leffler if (crypto_timing) 7837d1853eeSSam Leffler binuptime(&crp->crp_tstamp); 7847d1853eeSSam Leffler #endif 7857d1853eeSSam Leffler 7864acae0acSPawel Jakub Dawidek hid = CRYPTO_SESID2HID(crp->crp_sid); 7874acae0acSPawel Jakub Dawidek 788eb73a605SSam Leffler if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) { 789eb73a605SSam Leffler /* 790eb73a605SSam Leffler * Caller marked the request to be processed 791eb73a605SSam Leffler * immediately; dispatch it directly to the 792eb73a605SSam Leffler * driver unless the driver is currently blocked. 793eb73a605SSam Leffler */ 794f7890744SSam Leffler cap = crypto_checkdriver(hid); 7954acae0acSPawel Jakub Dawidek /* Driver cannot disappeared when there is an active session. */ 7964acae0acSPawel Jakub Dawidek KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__)); 7974acae0acSPawel Jakub Dawidek if (!cap->cc_qblocked) { 7984acae0acSPawel Jakub Dawidek result = crypto_invoke(cap, crp, 0); 7994acae0acSPawel Jakub Dawidek if (result != ERESTART) 8004acae0acSPawel Jakub Dawidek return (result); 801091d81d1SSam Leffler /* 802bda0abc6SPawel Jakub Dawidek * The driver ran out of resources, put the request on 803bda0abc6SPawel Jakub Dawidek * the queue. 804091d81d1SSam Leffler */ 805f7890744SSam Leffler } 806eb73a605SSam Leffler } 8074acae0acSPawel Jakub Dawidek CRYPTO_Q_LOCK(); 8084acae0acSPawel Jakub Dawidek TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); 8093a865c82SPawel Jakub Dawidek if (crp_sleep) 8103a865c82SPawel Jakub Dawidek wakeup_one(&crp_q); 8113569ae7fSSam Leffler CRYPTO_Q_UNLOCK(); 8124acae0acSPawel Jakub Dawidek return 0; 813091d81d1SSam Leffler } 814091d81d1SSam Leffler 815091d81d1SSam Leffler /* 816091d81d1SSam Leffler * Add an asymetric crypto request to a queue, 817091d81d1SSam Leffler * to be processed by the kernel thread. 818091d81d1SSam Leffler */ 819091d81d1SSam Leffler int 820091d81d1SSam Leffler crypto_kdispatch(struct cryptkop *krp) 821091d81d1SSam Leffler { 8226810ad6fSSam Leffler int error; 823091d81d1SSam Leffler 8247d1853eeSSam Leffler cryptostats.cs_kops++; 8257d1853eeSSam Leffler 8266810ad6fSSam Leffler error = crypto_kinvoke(krp, krp->krp_crid); 8276810ad6fSSam Leffler if (error == ERESTART) { 828091d81d1SSam Leffler CRYPTO_Q_LOCK(); 8294acae0acSPawel Jakub Dawidek TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); 8303a865c82SPawel Jakub Dawidek if (crp_sleep) 8313a865c82SPawel Jakub Dawidek wakeup_one(&crp_q); 832091d81d1SSam Leffler CRYPTO_Q_UNLOCK(); 8336810ad6fSSam Leffler error = 0; 8346810ad6fSSam Leffler } 8356810ad6fSSam Leffler return error; 836091d81d1SSam Leffler } 837091d81d1SSam Leffler 838091d81d1SSam Leffler /* 8396810ad6fSSam Leffler * Verify a driver is suitable for the specified operation. 8406810ad6fSSam Leffler */ 8416810ad6fSSam Leffler static __inline int 8426810ad6fSSam Leffler kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp) 8436810ad6fSSam Leffler { 8446810ad6fSSam Leffler return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0; 8456810ad6fSSam Leffler } 8466810ad6fSSam Leffler 8476810ad6fSSam Leffler /* 8486810ad6fSSam Leffler * Select a driver for an asym operation. The driver must 8496810ad6fSSam Leffler * support the necessary algorithm. The caller can constrain 8506810ad6fSSam Leffler * which device is selected with the flags parameter. The 8516810ad6fSSam Leffler * algorithm we use here is pretty stupid; just use the first 8526810ad6fSSam Leffler * driver that supports the algorithms we need. If there are 8536810ad6fSSam Leffler * multiple suitable drivers we choose the driver with the 8546810ad6fSSam Leffler * fewest active operations. We prefer hardware-backed 8556810ad6fSSam Leffler * drivers to software ones when either may be used. 8566810ad6fSSam Leffler */ 8576810ad6fSSam Leffler static struct cryptocap * 8586810ad6fSSam Leffler crypto_select_kdriver(const struct cryptkop *krp, int flags) 8596810ad6fSSam Leffler { 8606810ad6fSSam Leffler struct cryptocap *cap, *best, *blocked; 8616810ad6fSSam Leffler int match, hid; 8626810ad6fSSam Leffler 8636810ad6fSSam Leffler CRYPTO_DRIVER_ASSERT(); 8646810ad6fSSam Leffler 8656810ad6fSSam Leffler /* 8666810ad6fSSam Leffler * Look first for hardware crypto devices if permitted. 8676810ad6fSSam Leffler */ 8686810ad6fSSam Leffler if (flags & CRYPTOCAP_F_HARDWARE) 8696810ad6fSSam Leffler match = CRYPTOCAP_F_HARDWARE; 8706810ad6fSSam Leffler else 8716810ad6fSSam Leffler match = CRYPTOCAP_F_SOFTWARE; 8726810ad6fSSam Leffler best = NULL; 8736810ad6fSSam Leffler blocked = NULL; 8746810ad6fSSam Leffler again: 8756810ad6fSSam Leffler for (hid = 0; hid < crypto_drivers_num; hid++) { 8766810ad6fSSam Leffler cap = &crypto_drivers[hid]; 8776810ad6fSSam Leffler /* 8786810ad6fSSam Leffler * If it's not initialized, is in the process of 8796810ad6fSSam Leffler * going away, or is not appropriate (hardware 8806810ad6fSSam Leffler * or software based on match), then skip. 8816810ad6fSSam Leffler */ 8826810ad6fSSam Leffler if (cap->cc_dev == NULL || 8836810ad6fSSam Leffler (cap->cc_flags & CRYPTOCAP_F_CLEANUP) || 8846810ad6fSSam Leffler (cap->cc_flags & match) == 0) 8856810ad6fSSam Leffler continue; 8866810ad6fSSam Leffler 8876810ad6fSSam Leffler /* verify all the algorithms are supported. */ 8886810ad6fSSam Leffler if (kdriver_suitable(cap, krp)) { 8896810ad6fSSam Leffler if (best == NULL || 8906810ad6fSSam Leffler cap->cc_koperations < best->cc_koperations) 8916810ad6fSSam Leffler best = cap; 8926810ad6fSSam Leffler } 8936810ad6fSSam Leffler } 8946810ad6fSSam Leffler if (best != NULL) 8956810ad6fSSam Leffler return best; 8966810ad6fSSam Leffler if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) { 8976810ad6fSSam Leffler /* sort of an Algol 68-style for loop */ 8986810ad6fSSam Leffler match = CRYPTOCAP_F_SOFTWARE; 8996810ad6fSSam Leffler goto again; 9006810ad6fSSam Leffler } 9016810ad6fSSam Leffler return best; 9026810ad6fSSam Leffler } 9036810ad6fSSam Leffler 9046810ad6fSSam Leffler /* 9056810ad6fSSam Leffler * Dispatch an assymetric crypto request. 906091d81d1SSam Leffler */ 907091d81d1SSam Leffler static int 9086810ad6fSSam Leffler crypto_kinvoke(struct cryptkop *krp, int crid) 909091d81d1SSam Leffler { 9104acae0acSPawel Jakub Dawidek struct cryptocap *cap = NULL; 9116810ad6fSSam Leffler int error; 912091d81d1SSam Leffler 9134acae0acSPawel Jakub Dawidek KASSERT(krp != NULL, ("%s: krp == NULL", __func__)); 9144acae0acSPawel Jakub Dawidek KASSERT(krp->krp_callback != NULL, 9154acae0acSPawel Jakub Dawidek ("%s: krp->crp_callback == NULL", __func__)); 916091d81d1SSam Leffler 9174acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_LOCK(); 9186810ad6fSSam Leffler if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 9196810ad6fSSam Leffler cap = crypto_checkdriver(crid); 9206810ad6fSSam Leffler if (cap != NULL) { 9216810ad6fSSam Leffler /* 9226810ad6fSSam Leffler * Driver present, it must support the necessary 9236810ad6fSSam Leffler * algorithm and, if s/w drivers are excluded, 9246810ad6fSSam Leffler * it must be registered as hardware-backed. 9256810ad6fSSam Leffler */ 9266810ad6fSSam Leffler if (!kdriver_suitable(cap, krp) || 9276810ad6fSSam Leffler (!crypto_devallowsoft && 9286810ad6fSSam Leffler (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0)) 9296810ad6fSSam Leffler cap = NULL; 9304acae0acSPawel Jakub Dawidek } 9316810ad6fSSam Leffler } else { 9326810ad6fSSam Leffler /* 9336810ad6fSSam Leffler * No requested driver; select based on crid flags. 9346810ad6fSSam Leffler */ 9356810ad6fSSam Leffler if (!crypto_devallowsoft) /* NB: disallow s/w drivers */ 9366810ad6fSSam Leffler crid &= ~CRYPTOCAP_F_SOFTWARE; 9376810ad6fSSam Leffler cap = crypto_select_kdriver(krp, crid); 9384acae0acSPawel Jakub Dawidek } 9396810ad6fSSam Leffler if (cap != NULL && !cap->cc_kqblocked) { 9406810ad6fSSam Leffler krp->krp_hid = cap - crypto_drivers; 9414acae0acSPawel Jakub Dawidek cap->cc_koperations++; 9424acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_UNLOCK(); 9436810ad6fSSam Leffler error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0); 9444acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_LOCK(); 9454acae0acSPawel Jakub Dawidek if (error == ERESTART) { 9464acae0acSPawel Jakub Dawidek cap->cc_koperations--; 9474acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_UNLOCK(); 9484acae0acSPawel Jakub Dawidek return (error); 9494acae0acSPawel Jakub Dawidek } 9504acae0acSPawel Jakub Dawidek } else { 9516810ad6fSSam Leffler /* 9526810ad6fSSam Leffler * NB: cap is !NULL if device is blocked; in 9536810ad6fSSam Leffler * that case return ERESTART so the operation 9546810ad6fSSam Leffler * is resubmitted if possible. 9556810ad6fSSam Leffler */ 9566810ad6fSSam Leffler error = (cap == NULL) ? ENODEV : ERESTART; 9574acae0acSPawel Jakub Dawidek } 9584acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_UNLOCK(); 959091d81d1SSam Leffler 960091d81d1SSam Leffler if (error) { 961091d81d1SSam Leffler krp->krp_status = error; 9621a91ccccSSam Leffler crypto_kdone(krp); 963091d81d1SSam Leffler } 964091d81d1SSam Leffler return 0; 965091d81d1SSam Leffler } 966091d81d1SSam Leffler 9677d1853eeSSam Leffler #ifdef CRYPTO_TIMING 9687d1853eeSSam Leffler static void 9697d1853eeSSam Leffler crypto_tstat(struct cryptotstat *ts, struct bintime *bt) 9707d1853eeSSam Leffler { 9717d1853eeSSam Leffler struct bintime now, delta; 9727d1853eeSSam Leffler struct timespec t; 9737d1853eeSSam Leffler uint64_t u; 9747d1853eeSSam Leffler 9757d1853eeSSam Leffler binuptime(&now); 9767d1853eeSSam Leffler u = now.frac; 9777d1853eeSSam Leffler delta.frac = now.frac - bt->frac; 9787d1853eeSSam Leffler delta.sec = now.sec - bt->sec; 9797d1853eeSSam Leffler if (u < delta.frac) 9807d1853eeSSam Leffler delta.sec--; 9817d1853eeSSam Leffler bintime2timespec(&delta, &t); 9827d1853eeSSam Leffler timespecadd(&ts->acc, &t); 9837d1853eeSSam Leffler if (timespeccmp(&t, &ts->min, <)) 9847d1853eeSSam Leffler ts->min = t; 9857d1853eeSSam Leffler if (timespeccmp(&t, &ts->max, >)) 9867d1853eeSSam Leffler ts->max = t; 9877d1853eeSSam Leffler ts->count++; 9887d1853eeSSam Leffler 9897d1853eeSSam Leffler *bt = now; 9907d1853eeSSam Leffler } 9917d1853eeSSam Leffler #endif 9927d1853eeSSam Leffler 993091d81d1SSam Leffler /* 994091d81d1SSam Leffler * Dispatch a crypto request to the appropriate crypto devices. 995091d81d1SSam Leffler */ 996091d81d1SSam Leffler static int 9974acae0acSPawel Jakub Dawidek crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint) 998091d81d1SSam Leffler { 9994acae0acSPawel Jakub Dawidek 10004acae0acSPawel Jakub Dawidek KASSERT(crp != NULL, ("%s: crp == NULL", __func__)); 10014acae0acSPawel Jakub Dawidek KASSERT(crp->crp_callback != NULL, 10024acae0acSPawel Jakub Dawidek ("%s: crp->crp_callback == NULL", __func__)); 10034acae0acSPawel Jakub Dawidek KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__)); 1004091d81d1SSam Leffler 10057d1853eeSSam Leffler #ifdef CRYPTO_TIMING 10067d1853eeSSam Leffler if (crypto_timing) 10077d1853eeSSam Leffler crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp); 10087d1853eeSSam Leffler #endif 10094acae0acSPawel Jakub Dawidek if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 1010091d81d1SSam Leffler struct cryptodesc *crd; 1011091d81d1SSam Leffler u_int64_t nid; 1012091d81d1SSam Leffler 1013091d81d1SSam Leffler /* 1014091d81d1SSam Leffler * Driver has unregistered; migrate the session and return 1015091d81d1SSam Leffler * an error to the caller so they'll resubmit the op. 10164acae0acSPawel Jakub Dawidek * 10174acae0acSPawel Jakub Dawidek * XXX: What if there are more already queued requests for this 10184acae0acSPawel Jakub Dawidek * session? 1019091d81d1SSam Leffler */ 10204acae0acSPawel Jakub Dawidek crypto_freesession(crp->crp_sid); 10214acae0acSPawel Jakub Dawidek 1022091d81d1SSam Leffler for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next) 1023091d81d1SSam Leffler crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI); 1024091d81d1SSam Leffler 10256810ad6fSSam Leffler /* XXX propagate flags from initial session? */ 10266810ad6fSSam Leffler if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 10276810ad6fSSam Leffler CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0) 1028091d81d1SSam Leffler crp->crp_sid = nid; 1029091d81d1SSam Leffler 1030091d81d1SSam Leffler crp->crp_etype = EAGAIN; 10311a91ccccSSam Leffler crypto_done(crp); 1032091d81d1SSam Leffler return 0; 1033091d81d1SSam Leffler } else { 1034091d81d1SSam Leffler /* 1035091d81d1SSam Leffler * Invoke the driver to process the request. 1036091d81d1SSam Leffler */ 10376810ad6fSSam Leffler return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint); 1038091d81d1SSam Leffler } 1039091d81d1SSam Leffler } 1040091d81d1SSam Leffler 1041091d81d1SSam Leffler /* 1042091d81d1SSam Leffler * Release a set of crypto descriptors. 1043091d81d1SSam Leffler */ 1044091d81d1SSam Leffler void 1045091d81d1SSam Leffler crypto_freereq(struct cryptop *crp) 1046091d81d1SSam Leffler { 1047091d81d1SSam Leffler struct cryptodesc *crd; 1048091d81d1SSam Leffler 1049091d81d1SSam Leffler if (crp == NULL) 1050091d81d1SSam Leffler return; 1051091d81d1SSam Leffler 10520d5c337bSPawel Jakub Dawidek #ifdef DIAGNOSTIC 10530d5c337bSPawel Jakub Dawidek { 10540d5c337bSPawel Jakub Dawidek struct cryptop *crp2; 10550d5c337bSPawel Jakub Dawidek 10560d5c337bSPawel Jakub Dawidek CRYPTO_Q_LOCK(); 10570d5c337bSPawel Jakub Dawidek TAILQ_FOREACH(crp2, &crp_q, crp_next) { 10580d5c337bSPawel Jakub Dawidek KASSERT(crp2 != crp, 10590d5c337bSPawel Jakub Dawidek ("Freeing cryptop from the crypto queue (%p).", 10600d5c337bSPawel Jakub Dawidek crp)); 10610d5c337bSPawel Jakub Dawidek } 10620d5c337bSPawel Jakub Dawidek CRYPTO_Q_UNLOCK(); 10630d5c337bSPawel Jakub Dawidek CRYPTO_RETQ_LOCK(); 10640d5c337bSPawel Jakub Dawidek TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) { 10650d5c337bSPawel Jakub Dawidek KASSERT(crp2 != crp, 10660d5c337bSPawel Jakub Dawidek ("Freeing cryptop from the return queue (%p).", 10670d5c337bSPawel Jakub Dawidek crp)); 10680d5c337bSPawel Jakub Dawidek } 10690d5c337bSPawel Jakub Dawidek CRYPTO_RETQ_UNLOCK(); 10700d5c337bSPawel Jakub Dawidek } 10710d5c337bSPawel Jakub Dawidek #endif 10720d5c337bSPawel Jakub Dawidek 1073091d81d1SSam Leffler while ((crd = crp->crp_desc) != NULL) { 1074091d81d1SSam Leffler crp->crp_desc = crd->crd_next; 1075091d81d1SSam Leffler uma_zfree(cryptodesc_zone, crd); 1076091d81d1SSam Leffler } 1077091d81d1SSam Leffler uma_zfree(cryptop_zone, crp); 1078091d81d1SSam Leffler } 1079091d81d1SSam Leffler 1080091d81d1SSam Leffler /* 1081091d81d1SSam Leffler * Acquire a set of crypto descriptors. 1082091d81d1SSam Leffler */ 1083091d81d1SSam Leffler struct cryptop * 1084091d81d1SSam Leffler crypto_getreq(int num) 1085091d81d1SSam Leffler { 1086091d81d1SSam Leffler struct cryptodesc *crd; 1087091d81d1SSam Leffler struct cryptop *crp; 1088091d81d1SSam Leffler 1089bc0c6d3cSSam Leffler crp = uma_zalloc(cryptop_zone, M_NOWAIT|M_ZERO); 1090091d81d1SSam Leffler if (crp != NULL) { 1091091d81d1SSam Leffler while (num--) { 1092bc0c6d3cSSam Leffler crd = uma_zalloc(cryptodesc_zone, M_NOWAIT|M_ZERO); 1093091d81d1SSam Leffler if (crd == NULL) { 1094091d81d1SSam Leffler crypto_freereq(crp); 1095091d81d1SSam Leffler return NULL; 1096091d81d1SSam Leffler } 1097091d81d1SSam Leffler 1098091d81d1SSam Leffler crd->crd_next = crp->crp_desc; 1099091d81d1SSam Leffler crp->crp_desc = crd; 1100091d81d1SSam Leffler } 1101091d81d1SSam Leffler } 1102091d81d1SSam Leffler return crp; 1103091d81d1SSam Leffler } 1104091d81d1SSam Leffler 1105091d81d1SSam Leffler /* 1106091d81d1SSam Leffler * Invoke the callback on behalf of the driver. 1107091d81d1SSam Leffler */ 1108091d81d1SSam Leffler void 1109091d81d1SSam Leffler crypto_done(struct cryptop *crp) 1110091d81d1SSam Leffler { 11113569ae7fSSam Leffler KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0, 11123569ae7fSSam Leffler ("crypto_done: op already done, flags 0x%x", crp->crp_flags)); 11133569ae7fSSam Leffler crp->crp_flags |= CRYPTO_F_DONE; 11147d1853eeSSam Leffler if (crp->crp_etype != 0) 11157d1853eeSSam Leffler cryptostats.cs_errs++; 11167d1853eeSSam Leffler #ifdef CRYPTO_TIMING 11177d1853eeSSam Leffler if (crypto_timing) 11187d1853eeSSam Leffler crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp); 11197d1853eeSSam Leffler #endif 1120d8409aafSSam Leffler /* 1121d8409aafSSam Leffler * CBIMM means unconditionally do the callback immediately; 1122d8409aafSSam Leffler * CBIFSYNC means do the callback immediately only if the 1123d8409aafSSam Leffler * operation was done synchronously. Both are used to avoid 1124d8409aafSSam Leffler * doing extraneous context switches; the latter is mostly 1125d8409aafSSam Leffler * used with the software crypto driver. 1126d8409aafSSam Leffler */ 1127d8409aafSSam Leffler if ((crp->crp_flags & CRYPTO_F_CBIMM) || 1128d8409aafSSam Leffler ((crp->crp_flags & CRYPTO_F_CBIFSYNC) && 1129d8409aafSSam Leffler (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) { 1130eb73a605SSam Leffler /* 1131eb73a605SSam Leffler * Do the callback directly. This is ok when the 1132eb73a605SSam Leffler * callback routine does very little (e.g. the 1133eb73a605SSam Leffler * /dev/crypto callback method just does a wakeup). 1134eb73a605SSam Leffler */ 1135eb73a605SSam Leffler #ifdef CRYPTO_TIMING 1136eb73a605SSam Leffler if (crypto_timing) { 1137eb73a605SSam Leffler /* 1138eb73a605SSam Leffler * NB: We must copy the timestamp before 1139eb73a605SSam Leffler * doing the callback as the cryptop is 1140eb73a605SSam Leffler * likely to be reclaimed. 1141eb73a605SSam Leffler */ 1142eb73a605SSam Leffler struct bintime t = crp->crp_tstamp; 1143eb73a605SSam Leffler crypto_tstat(&cryptostats.cs_cb, &t); 1144eb73a605SSam Leffler crp->crp_callback(crp); 1145eb73a605SSam Leffler crypto_tstat(&cryptostats.cs_finis, &t); 1146eb73a605SSam Leffler } else 1147eb73a605SSam Leffler #endif 1148eb73a605SSam Leffler crp->crp_callback(crp); 1149eb73a605SSam Leffler } else { 1150eb73a605SSam Leffler /* 1151eb73a605SSam Leffler * Normal case; queue the callback for the thread. 1152eb73a605SSam Leffler */ 1153091d81d1SSam Leffler CRYPTO_RETQ_LOCK(); 11549c12ca29SPawel Jakub Dawidek if (CRYPTO_RETQ_EMPTY()) 11551a91ccccSSam Leffler wakeup_one(&crp_ret_q); /* shared wait channel */ 11564acae0acSPawel Jakub Dawidek TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next); 11571a91ccccSSam Leffler CRYPTO_RETQ_UNLOCK(); 1158091d81d1SSam Leffler } 1159eb73a605SSam Leffler } 1160091d81d1SSam Leffler 1161091d81d1SSam Leffler /* 1162091d81d1SSam Leffler * Invoke the callback on behalf of the driver. 1163091d81d1SSam Leffler */ 1164091d81d1SSam Leffler void 1165091d81d1SSam Leffler crypto_kdone(struct cryptkop *krp) 1166091d81d1SSam Leffler { 11674acae0acSPawel Jakub Dawidek struct cryptocap *cap; 1168091d81d1SSam Leffler 11697d1853eeSSam Leffler if (krp->krp_status != 0) 11707d1853eeSSam Leffler cryptostats.cs_kerrs++; 11714acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_LOCK(); 11724acae0acSPawel Jakub Dawidek /* XXX: What if driver is loaded in the meantime? */ 11734acae0acSPawel Jakub Dawidek if (krp->krp_hid < crypto_drivers_num) { 11744acae0acSPawel Jakub Dawidek cap = &crypto_drivers[krp->krp_hid]; 11754acae0acSPawel Jakub Dawidek cap->cc_koperations--; 11764acae0acSPawel Jakub Dawidek KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0")); 11774acae0acSPawel Jakub Dawidek if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) 11784acae0acSPawel Jakub Dawidek crypto_remove(cap); 11794acae0acSPawel Jakub Dawidek } 11804acae0acSPawel Jakub Dawidek CRYPTO_DRIVER_UNLOCK(); 1181091d81d1SSam Leffler CRYPTO_RETQ_LOCK(); 11829c12ca29SPawel Jakub Dawidek if (CRYPTO_RETQ_EMPTY()) 11831a91ccccSSam Leffler wakeup_one(&crp_ret_q); /* shared wait channel */ 11844acae0acSPawel Jakub Dawidek TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next); 11851a91ccccSSam Leffler CRYPTO_RETQ_UNLOCK(); 1186091d81d1SSam Leffler } 1187091d81d1SSam Leffler 1188091d81d1SSam Leffler int 1189091d81d1SSam Leffler crypto_getfeat(int *featp) 1190091d81d1SSam Leffler { 1191091d81d1SSam Leffler int hid, kalg, feat = 0; 1192091d81d1SSam Leffler 1193091d81d1SSam Leffler CRYPTO_DRIVER_LOCK(); 1194091d81d1SSam Leffler for (hid = 0; hid < crypto_drivers_num; hid++) { 11956810ad6fSSam Leffler const struct cryptocap *cap = &crypto_drivers[hid]; 11966810ad6fSSam Leffler 11976810ad6fSSam Leffler if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && 1198091d81d1SSam Leffler !crypto_devallowsoft) { 1199091d81d1SSam Leffler continue; 1200091d81d1SSam Leffler } 1201091d81d1SSam Leffler for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) 12026810ad6fSSam Leffler if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED) 1203091d81d1SSam Leffler feat |= 1 << kalg; 1204091d81d1SSam Leffler } 1205091d81d1SSam Leffler CRYPTO_DRIVER_UNLOCK(); 1206091d81d1SSam Leffler *featp = feat; 1207091d81d1SSam Leffler return (0); 1208091d81d1SSam Leffler } 1209091d81d1SSam Leffler 121051e45326SSam Leffler /* 121151e45326SSam Leffler * Terminate a thread at module unload. The process that 121251e45326SSam Leffler * initiated this is waiting for us to signal that we're gone; 121351e45326SSam Leffler * wake it up and exit. We use the driver table lock to insure 121451e45326SSam Leffler * we don't do the wakeup before they're waiting. There is no 121551e45326SSam Leffler * race here because the waiter sleeps on the proc lock for the 121651e45326SSam Leffler * thread so it gets notified at the right time because of an 121751e45326SSam Leffler * extra wakeup that's done in exit1(). 121851e45326SSam Leffler */ 1219091d81d1SSam Leffler static void 122051e45326SSam Leffler crypto_finis(void *chan) 1221091d81d1SSam Leffler { 122251e45326SSam Leffler CRYPTO_DRIVER_LOCK(); 122351e45326SSam Leffler wakeup_one(chan); 122451e45326SSam Leffler CRYPTO_DRIVER_UNLOCK(); 122551e45326SSam Leffler kthread_exit(0); 1226091d81d1SSam Leffler } 1227091d81d1SSam Leffler 1228091d81d1SSam Leffler /* 12291a91ccccSSam Leffler * Crypto thread, dispatches crypto requests. 1230091d81d1SSam Leffler */ 1231091d81d1SSam Leffler static void 1232091d81d1SSam Leffler crypto_proc(void) 1233091d81d1SSam Leffler { 12341a91ccccSSam Leffler struct cryptop *crp, *submit; 12351a91ccccSSam Leffler struct cryptkop *krp; 1236091d81d1SSam Leffler struct cryptocap *cap; 12374acae0acSPawel Jakub Dawidek u_int32_t hid; 1238091d81d1SSam Leffler int result, hint; 1239091d81d1SSam Leffler 12401a91ccccSSam Leffler CRYPTO_Q_LOCK(); 1241091d81d1SSam Leffler for (;;) { 1242091d81d1SSam Leffler /* 1243091d81d1SSam Leffler * Find the first element in the queue that can be 1244091d81d1SSam Leffler * processed and look-ahead to see if multiple ops 1245091d81d1SSam Leffler * are ready for the same driver. 1246091d81d1SSam Leffler */ 1247091d81d1SSam Leffler submit = NULL; 1248091d81d1SSam Leffler hint = 0; 1249091d81d1SSam Leffler TAILQ_FOREACH(crp, &crp_q, crp_next) { 12504acae0acSPawel Jakub Dawidek hid = CRYPTO_SESID2HID(crp->crp_sid); 1251091d81d1SSam Leffler cap = crypto_checkdriver(hid); 12524acae0acSPawel Jakub Dawidek /* 12534acae0acSPawel Jakub Dawidek * Driver cannot disappeared when there is an active 12544acae0acSPawel Jakub Dawidek * session. 12554acae0acSPawel Jakub Dawidek */ 1256c3c82036SPawel Jakub Dawidek KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 1257c3c82036SPawel Jakub Dawidek __func__, __LINE__)); 12586810ad6fSSam Leffler if (cap == NULL || cap->cc_dev == NULL) { 1259091d81d1SSam Leffler /* Op needs to be migrated, process it. */ 1260091d81d1SSam Leffler if (submit == NULL) 1261091d81d1SSam Leffler submit = crp; 1262091d81d1SSam Leffler break; 1263091d81d1SSam Leffler } 1264091d81d1SSam Leffler if (!cap->cc_qblocked) { 1265091d81d1SSam Leffler if (submit != NULL) { 1266091d81d1SSam Leffler /* 1267091d81d1SSam Leffler * We stop on finding another op, 1268091d81d1SSam Leffler * regardless whether its for the same 1269091d81d1SSam Leffler * driver or not. We could keep 1270091d81d1SSam Leffler * searching the queue but it might be 1271091d81d1SSam Leffler * better to just use a per-driver 1272091d81d1SSam Leffler * queue instead. 1273091d81d1SSam Leffler */ 127407d0c94aSSam Leffler if (CRYPTO_SESID2HID(submit->crp_sid) == hid) 1275091d81d1SSam Leffler hint = CRYPTO_HINT_MORE; 1276091d81d1SSam Leffler break; 1277091d81d1SSam Leffler } else { 1278091d81d1SSam Leffler submit = crp; 1279eb73a605SSam Leffler if ((submit->crp_flags & CRYPTO_F_BATCH) == 0) 1280091d81d1SSam Leffler break; 1281091d81d1SSam Leffler /* keep scanning for more are q'd */ 1282091d81d1SSam Leffler } 1283091d81d1SSam Leffler } 1284091d81d1SSam Leffler } 1285091d81d1SSam Leffler if (submit != NULL) { 1286091d81d1SSam Leffler TAILQ_REMOVE(&crp_q, submit, crp_next); 12874acae0acSPawel Jakub Dawidek hid = CRYPTO_SESID2HID(submit->crp_sid); 12884acae0acSPawel Jakub Dawidek cap = crypto_checkdriver(hid); 1289c3c82036SPawel Jakub Dawidek KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 1290c3c82036SPawel Jakub Dawidek __func__, __LINE__)); 12914acae0acSPawel Jakub Dawidek result = crypto_invoke(cap, submit, hint); 1292091d81d1SSam Leffler if (result == ERESTART) { 1293091d81d1SSam Leffler /* 1294091d81d1SSam Leffler * The driver ran out of resources, mark the 1295091d81d1SSam Leffler * driver ``blocked'' for cryptop's and put 1296091d81d1SSam Leffler * the request back in the queue. It would 1297091d81d1SSam Leffler * best to put the request back where we got 1298091d81d1SSam Leffler * it but that's hard so for now we put it 1299091d81d1SSam Leffler * at the front. This should be ok; putting 1300091d81d1SSam Leffler * it at the end does not work. 1301091d81d1SSam Leffler */ 1302091d81d1SSam Leffler /* XXX validate sid again? */ 130307d0c94aSSam Leffler crypto_drivers[CRYPTO_SESID2HID(submit->crp_sid)].cc_qblocked = 1; 1304091d81d1SSam Leffler TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); 13057d1853eeSSam Leffler cryptostats.cs_blocks++; 1306091d81d1SSam Leffler } 1307091d81d1SSam Leffler } 1308091d81d1SSam Leffler 1309091d81d1SSam Leffler /* As above, but for key ops */ 1310091d81d1SSam Leffler TAILQ_FOREACH(krp, &crp_kq, krp_next) { 1311091d81d1SSam Leffler cap = crypto_checkdriver(krp->krp_hid); 13126810ad6fSSam Leffler if (cap == NULL || cap->cc_dev == NULL) { 13136810ad6fSSam Leffler /* 13146810ad6fSSam Leffler * Operation needs to be migrated, invalidate 13156810ad6fSSam Leffler * the assigned device so it will reselect a 13166810ad6fSSam Leffler * new one below. Propagate the original 13176810ad6fSSam Leffler * crid selection flags if supplied. 13186810ad6fSSam Leffler */ 13196810ad6fSSam Leffler krp->krp_hid = krp->krp_crid & 13206810ad6fSSam Leffler (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE); 13216810ad6fSSam Leffler if (krp->krp_hid == 0) 13226810ad6fSSam Leffler krp->krp_hid = 13236810ad6fSSam Leffler CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE; 1324091d81d1SSam Leffler break; 1325091d81d1SSam Leffler } 1326091d81d1SSam Leffler if (!cap->cc_kqblocked) 1327091d81d1SSam Leffler break; 1328091d81d1SSam Leffler } 1329091d81d1SSam Leffler if (krp != NULL) { 1330091d81d1SSam Leffler TAILQ_REMOVE(&crp_kq, krp, krp_next); 13316810ad6fSSam Leffler result = crypto_kinvoke(krp, krp->krp_hid); 1332091d81d1SSam Leffler if (result == ERESTART) { 1333091d81d1SSam Leffler /* 1334091d81d1SSam Leffler * The driver ran out of resources, mark the 1335091d81d1SSam Leffler * driver ``blocked'' for cryptkop's and put 1336091d81d1SSam Leffler * the request back in the queue. It would 1337091d81d1SSam Leffler * best to put the request back where we got 1338091d81d1SSam Leffler * it but that's hard so for now we put it 1339091d81d1SSam Leffler * at the front. This should be ok; putting 1340091d81d1SSam Leffler * it at the end does not work. 1341091d81d1SSam Leffler */ 1342091d81d1SSam Leffler /* XXX validate sid again? */ 1343091d81d1SSam Leffler crypto_drivers[krp->krp_hid].cc_kqblocked = 1; 1344091d81d1SSam Leffler TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); 13457d1853eeSSam Leffler cryptostats.cs_kblocks++; 1346091d81d1SSam Leffler } 1347091d81d1SSam Leffler } 1348091d81d1SSam Leffler 13491a91ccccSSam Leffler if (submit == NULL && krp == NULL) { 1350091d81d1SSam Leffler /* 1351091d81d1SSam Leffler * Nothing more to be processed. Sleep until we're 1352091d81d1SSam Leffler * woken because there are more ops to process. 1353091d81d1SSam Leffler * This happens either by submission or by a driver 1354091d81d1SSam Leffler * becoming unblocked and notifying us through 1355091d81d1SSam Leffler * crypto_unblock. Note that when we wakeup we 1356091d81d1SSam Leffler * start processing each queue again from the 1357091d81d1SSam Leffler * front. It's not clear that it's important to 1358091d81d1SSam Leffler * preserve this ordering since ops may finish 1359091d81d1SSam Leffler * out of order if dispatched to different devices 1360091d81d1SSam Leffler * and some become blocked while others do not. 1361091d81d1SSam Leffler */ 13623a865c82SPawel Jakub Dawidek crp_sleep = 1; 13631a91ccccSSam Leffler msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); 13643a865c82SPawel Jakub Dawidek crp_sleep = 0; 136551e45326SSam Leffler if (cryptoproc == NULL) 136651e45326SSam Leffler break; 13677d1853eeSSam Leffler cryptostats.cs_intrs++; 1368091d81d1SSam Leffler } 1369091d81d1SSam Leffler } 137051e45326SSam Leffler CRYPTO_Q_UNLOCK(); 13711a91ccccSSam Leffler 137251e45326SSam Leffler crypto_finis(&crp_q); 13731a91ccccSSam Leffler } 13741a91ccccSSam Leffler 13751a91ccccSSam Leffler /* 13761a91ccccSSam Leffler * Crypto returns thread, does callbacks for processed crypto requests. 13771a91ccccSSam Leffler * Callbacks are done here, rather than in the crypto drivers, because 13781a91ccccSSam Leffler * callbacks typically are expensive and would slow interrupt handling. 13791a91ccccSSam Leffler */ 13801a91ccccSSam Leffler static void 13811a91ccccSSam Leffler crypto_ret_proc(void) 13821a91ccccSSam Leffler { 13831a91ccccSSam Leffler struct cryptop *crpt; 13841a91ccccSSam Leffler struct cryptkop *krpt; 13851a91ccccSSam Leffler 13861a91ccccSSam Leffler CRYPTO_RETQ_LOCK(); 13871a91ccccSSam Leffler for (;;) { 13881a91ccccSSam Leffler /* Harvest return q's for completed ops */ 13891a91ccccSSam Leffler crpt = TAILQ_FIRST(&crp_ret_q); 13901a91ccccSSam Leffler if (crpt != NULL) 13911a91ccccSSam Leffler TAILQ_REMOVE(&crp_ret_q, crpt, crp_next); 13921a91ccccSSam Leffler 13931a91ccccSSam Leffler krpt = TAILQ_FIRST(&crp_ret_kq); 13941a91ccccSSam Leffler if (krpt != NULL) 13951a91ccccSSam Leffler TAILQ_REMOVE(&crp_ret_kq, krpt, krp_next); 13961a91ccccSSam Leffler 13971a91ccccSSam Leffler if (crpt != NULL || krpt != NULL) { 13981a91ccccSSam Leffler CRYPTO_RETQ_UNLOCK(); 13991a91ccccSSam Leffler /* 14001a91ccccSSam Leffler * Run callbacks unlocked. 14011a91ccccSSam Leffler */ 14027d1853eeSSam Leffler if (crpt != NULL) { 14037d1853eeSSam Leffler #ifdef CRYPTO_TIMING 14047d1853eeSSam Leffler if (crypto_timing) { 14057d1853eeSSam Leffler /* 14067d1853eeSSam Leffler * NB: We must copy the timestamp before 14077d1853eeSSam Leffler * doing the callback as the cryptop is 14087d1853eeSSam Leffler * likely to be reclaimed. 14097d1853eeSSam Leffler */ 14107d1853eeSSam Leffler struct bintime t = crpt->crp_tstamp; 14117d1853eeSSam Leffler crypto_tstat(&cryptostats.cs_cb, &t); 14121a91ccccSSam Leffler crpt->crp_callback(crpt); 14137d1853eeSSam Leffler crypto_tstat(&cryptostats.cs_finis, &t); 14147d1853eeSSam Leffler } else 14157d1853eeSSam Leffler #endif 14167d1853eeSSam Leffler crpt->crp_callback(crpt); 14177d1853eeSSam Leffler } 14181a91ccccSSam Leffler if (krpt != NULL) 14191a91ccccSSam Leffler krpt->krp_callback(krpt); 14201a91ccccSSam Leffler CRYPTO_RETQ_LOCK(); 14211a91ccccSSam Leffler } else { 14221a91ccccSSam Leffler /* 14231a91ccccSSam Leffler * Nothing more to be processed. Sleep until we're 14241a91ccccSSam Leffler * woken because there are more returns to process. 14251a91ccccSSam Leffler */ 14261a91ccccSSam Leffler msleep(&crp_ret_q, &crypto_ret_q_mtx, PWAIT, 14271a91ccccSSam Leffler "crypto_ret_wait", 0); 142851e45326SSam Leffler if (cryptoretproc == NULL) 142951e45326SSam Leffler break; 14307d1853eeSSam Leffler cryptostats.cs_rets++; 14311a91ccccSSam Leffler } 14321a91ccccSSam Leffler } 143351e45326SSam Leffler CRYPTO_RETQ_UNLOCK(); 143451e45326SSam Leffler 143551e45326SSam Leffler crypto_finis(&crp_ret_q); 14361a91ccccSSam Leffler } 14376810ad6fSSam Leffler 14386810ad6fSSam Leffler #ifdef DDB 14396810ad6fSSam Leffler static void 14406810ad6fSSam Leffler db_show_drivers(void) 14416810ad6fSSam Leffler { 14426810ad6fSSam Leffler int hid; 14436810ad6fSSam Leffler 14446810ad6fSSam Leffler db_printf("%12s %4s %4s %8s %2s %2s\n" 14456810ad6fSSam Leffler , "Device" 14466810ad6fSSam Leffler , "Ses" 14476810ad6fSSam Leffler , "Kops" 14486810ad6fSSam Leffler , "Flags" 14496810ad6fSSam Leffler , "QB" 14506810ad6fSSam Leffler , "KB" 14516810ad6fSSam Leffler ); 14526810ad6fSSam Leffler for (hid = 0; hid < crypto_drivers_num; hid++) { 14536810ad6fSSam Leffler const struct cryptocap *cap = &crypto_drivers[hid]; 14546810ad6fSSam Leffler if (cap->cc_dev == NULL) 14556810ad6fSSam Leffler continue; 14566810ad6fSSam Leffler db_printf("%-12s %4u %4u %08x %2u %2u\n" 14576810ad6fSSam Leffler , device_get_nameunit(cap->cc_dev) 14586810ad6fSSam Leffler , cap->cc_sessions 14596810ad6fSSam Leffler , cap->cc_koperations 14606810ad6fSSam Leffler , cap->cc_flags 14616810ad6fSSam Leffler , cap->cc_qblocked 14626810ad6fSSam Leffler , cap->cc_kqblocked 14636810ad6fSSam Leffler ); 14646810ad6fSSam Leffler } 14656810ad6fSSam Leffler } 14666810ad6fSSam Leffler 14676810ad6fSSam Leffler DB_SHOW_COMMAND(crypto, db_show_crypto) 14686810ad6fSSam Leffler { 14696810ad6fSSam Leffler struct cryptop *crp; 14706810ad6fSSam Leffler 14716810ad6fSSam Leffler db_show_drivers(); 14726810ad6fSSam Leffler db_printf("\n"); 14736810ad6fSSam Leffler 14746810ad6fSSam Leffler db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n", 14756810ad6fSSam Leffler "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", 14766810ad6fSSam Leffler "Desc", "Callback"); 14776810ad6fSSam Leffler TAILQ_FOREACH(crp, &crp_q, crp_next) { 14786810ad6fSSam Leffler db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n" 14796810ad6fSSam Leffler , (int) CRYPTO_SESID2HID(crp->crp_sid) 14806810ad6fSSam Leffler , (int) CRYPTO_SESID2CAPS(crp->crp_sid) 14816810ad6fSSam Leffler , crp->crp_ilen, crp->crp_olen 14826810ad6fSSam Leffler , crp->crp_etype 14836810ad6fSSam Leffler , crp->crp_flags 14846810ad6fSSam Leffler , crp->crp_desc 14856810ad6fSSam Leffler , crp->crp_callback 14866810ad6fSSam Leffler ); 14876810ad6fSSam Leffler } 14886810ad6fSSam Leffler if (!TAILQ_EMPTY(&crp_ret_q)) { 14896810ad6fSSam Leffler db_printf("\n%4s %4s %4s %8s\n", 14906810ad6fSSam Leffler "HID", "Etype", "Flags", "Callback"); 14916810ad6fSSam Leffler TAILQ_FOREACH(crp, &crp_ret_q, crp_next) { 14926810ad6fSSam Leffler db_printf("%4u %4u %04x %8p\n" 14936810ad6fSSam Leffler , (int) CRYPTO_SESID2HID(crp->crp_sid) 14946810ad6fSSam Leffler , crp->crp_etype 14956810ad6fSSam Leffler , crp->crp_flags 14966810ad6fSSam Leffler , crp->crp_callback 14976810ad6fSSam Leffler ); 14986810ad6fSSam Leffler } 14996810ad6fSSam Leffler } 15006810ad6fSSam Leffler } 15016810ad6fSSam Leffler 15026810ad6fSSam Leffler DB_SHOW_COMMAND(kcrypto, db_show_kcrypto) 15036810ad6fSSam Leffler { 15046810ad6fSSam Leffler struct cryptkop *krp; 15056810ad6fSSam Leffler 15066810ad6fSSam Leffler db_show_drivers(); 15076810ad6fSSam Leffler db_printf("\n"); 15086810ad6fSSam Leffler 15096810ad6fSSam Leffler db_printf("%4s %5s %4s %4s %8s %4s %8s\n", 15106810ad6fSSam Leffler "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback"); 15116810ad6fSSam Leffler TAILQ_FOREACH(krp, &crp_kq, krp_next) { 15126810ad6fSSam Leffler db_printf("%4u %5u %4u %4u %08x %4u %8p\n" 15136810ad6fSSam Leffler , krp->krp_op 15146810ad6fSSam Leffler , krp->krp_status 15156810ad6fSSam Leffler , krp->krp_iparams, krp->krp_oparams 15166810ad6fSSam Leffler , krp->krp_crid, krp->krp_hid 15176810ad6fSSam Leffler , krp->krp_callback 15186810ad6fSSam Leffler ); 15196810ad6fSSam Leffler } 15206810ad6fSSam Leffler if (!TAILQ_EMPTY(&crp_ret_q)) { 15216810ad6fSSam Leffler db_printf("%4s %5s %8s %4s %8s\n", 15226810ad6fSSam Leffler "Op", "Status", "CRID", "HID", "Callback"); 15236810ad6fSSam Leffler TAILQ_FOREACH(krp, &crp_ret_kq, krp_next) { 15246810ad6fSSam Leffler db_printf("%4u %5u %08x %4u %8p\n" 15256810ad6fSSam Leffler , krp->krp_op 15266810ad6fSSam Leffler , krp->krp_status 15276810ad6fSSam Leffler , krp->krp_crid, krp->krp_hid 15286810ad6fSSam Leffler , krp->krp_callback 15296810ad6fSSam Leffler ); 15306810ad6fSSam Leffler } 15316810ad6fSSam Leffler } 15326810ad6fSSam Leffler } 15336810ad6fSSam Leffler #endif 15346810ad6fSSam Leffler 15356810ad6fSSam Leffler int crypto_modevent(module_t mod, int type, void *unused); 15366810ad6fSSam Leffler 15376810ad6fSSam Leffler /* 15386810ad6fSSam Leffler * Initialization code, both for static and dynamic loading. 15396810ad6fSSam Leffler * Note this is not invoked with the usual MODULE_DECLARE 15406810ad6fSSam Leffler * mechanism but instead is listed as a dependency by the 15416810ad6fSSam Leffler * cryptosoft driver. This guarantees proper ordering of 15426810ad6fSSam Leffler * calls on module load/unload. 15436810ad6fSSam Leffler */ 15446810ad6fSSam Leffler int 15456810ad6fSSam Leffler crypto_modevent(module_t mod, int type, void *unused) 15466810ad6fSSam Leffler { 15476810ad6fSSam Leffler int error = EINVAL; 15486810ad6fSSam Leffler 15496810ad6fSSam Leffler switch (type) { 15506810ad6fSSam Leffler case MOD_LOAD: 15516810ad6fSSam Leffler error = crypto_init(); 15526810ad6fSSam Leffler if (error == 0 && bootverbose) 15536810ad6fSSam Leffler printf("crypto: <crypto core>\n"); 15546810ad6fSSam Leffler break; 15556810ad6fSSam Leffler case MOD_UNLOAD: 15566810ad6fSSam Leffler /*XXX disallow if active sessions */ 15576810ad6fSSam Leffler error = 0; 15586810ad6fSSam Leffler crypto_destroy(); 15596810ad6fSSam Leffler return 0; 15606810ad6fSSam Leffler } 15616810ad6fSSam Leffler return error; 15626810ad6fSSam Leffler } 15636810ad6fSSam Leffler MODULE_VERSION(crypto, 1); 15646810ad6fSSam Leffler MODULE_DEPEND(crypto, zlib, 1, 1, 1); 1565