1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2003 Sam Leffler, Errno Consulting 5 * Copyright (c) 2003 Global Technology Associates, Inc. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 #ifndef _SAFE_SAFEVAR_H_ 32 #define _SAFE_SAFEVAR_H_ 33 34 /* Maximum queue length */ 35 #ifndef SAFE_MAX_NQUEUE 36 #define SAFE_MAX_NQUEUE 60 37 #endif 38 39 #define SAFE_MAX_PART 64 /* Maximum scatter/gather depth */ 40 #define SAFE_DMA_BOUNDARY 0 /* No boundary for source DMA ops */ 41 #define SAFE_MAX_DSIZE MCLBYTES /* Fixed scatter particle size */ 42 #define SAFE_MAX_SSIZE 0x0ffff /* Maximum gather particle size */ 43 #define SAFE_MAX_DMA 0xfffff /* Maximum PE operand size (20 bits) */ 44 /* total src+dst particle descriptors */ 45 #define SAFE_TOTAL_DPART (SAFE_MAX_NQUEUE * SAFE_MAX_PART) 46 #define SAFE_TOTAL_SPART (SAFE_MAX_NQUEUE * SAFE_MAX_PART) 47 48 #define SAFE_RNG_MAXBUFSIZ 128 /* 32-bit words */ 49 50 #define SAFE_CARD(sid) (((sid) & 0xf0000000) >> 28) 51 #define SAFE_SESSION(sid) ( (sid) & 0x0fffffff) 52 #define SAFE_SID(crd, sesn) (((crd) << 28) | ((sesn) & 0x0fffffff)) 53 54 #define SAFE_DEF_RTY 0xff /* PCI Retry Timeout */ 55 #define SAFE_DEF_TOUT 0xff /* PCI TRDY Timeout */ 56 #define SAFE_DEF_CACHELINE 0x01 /* Cache Line setting */ 57 58 #ifdef _KERNEL 59 /* 60 * State associated with the allocation of each chunk 61 * of memory setup for DMA. 62 */ 63 struct safe_dma_alloc { 64 u_int32_t dma_paddr; /* physical address */ 65 caddr_t dma_vaddr; /* virtual address */ 66 bus_dma_tag_t dma_tag; /* bus dma tag used */ 67 bus_dmamap_t dma_map; /* associated map */ 68 bus_dma_segment_t dma_seg; 69 bus_size_t dma_size; /* mapped memory size (bytes) */ 70 int dma_nseg; /* number of segments */ 71 }; 72 73 /* 74 * Cryptographic operand state. One of these exists for each 75 * source and destination operand passed in from the crypto 76 * subsystem. When possible source and destination operands 77 * refer to the same memory. More often they are distinct. 78 * We track the virtual address of each operand as well as 79 * where each is mapped for DMA. 80 */ 81 struct safe_operand { 82 union { 83 struct mbuf *m; 84 struct uio *io; 85 } u; 86 bus_dmamap_t map; 87 bus_size_t mapsize; 88 int nsegs; 89 bus_dma_segment_t segs[SAFE_MAX_PART]; 90 }; 91 92 /* 93 * Packet engine ring entry and cryptographic operation state. 94 * The packet engine requires a ring of descriptors that contain 95 * pointers to various cryptographic state. However the ring 96 * configuration register allows you to specify an arbitrary size 97 * for ring entries. We use this feature to collect most of the 98 * state for each cryptographic request into one spot. Other than 99 * ring entries only the ``particle descriptors'' (scatter/gather 100 * lists) and the actual operand data are kept separate. The 101 * particle descriptors must also be organized in rings. The 102 * operand data can be located aribtrarily (modulo alignment constraints). 103 * 104 * Note that the descriptor ring is mapped onto the PCI bus so 105 * the hardware can DMA data. This means the entire ring must be 106 * contiguous. 107 */ 108 struct safe_ringentry { 109 struct safe_desc re_desc; /* command descriptor */ 110 struct safe_sarec re_sa; /* SA record */ 111 struct safe_sastate re_sastate; /* SA state record */ 112 struct cryptop *re_crp; /* crypto operation */ 113 114 struct safe_operand re_src; /* source operand */ 115 struct safe_operand re_dst; /* destination operand */ 116 117 int re_sesn; /* crypto session ID */ 118 int re_flags; 119 #define SAFE_QFLAGS_COPYOUTIV 0x1 /* copy back on completion */ 120 #define SAFE_QFLAGS_COPYOUTICV 0x2 /* copy back on completion */ 121 }; 122 123 #define re_src_m re_src.u.m 124 #define re_src_io re_src.u.io 125 #define re_src_map re_src.map 126 #define re_src_nsegs re_src.nsegs 127 #define re_src_segs re_src.segs 128 #define re_src_mapsize re_src.mapsize 129 130 #define re_dst_m re_dst.u.m 131 #define re_dst_io re_dst.u.io 132 #define re_dst_map re_dst.map 133 #define re_dst_nsegs re_dst.nsegs 134 #define re_dst_segs re_dst.segs 135 #define re_dst_mapsize re_dst.mapsize 136 137 struct rndstate_test; 138 139 struct safe_session { 140 u_int32_t ses_used; 141 u_int32_t ses_klen; /* key length in bits */ 142 u_int32_t ses_key[8]; /* DES/3DES/AES key */ 143 u_int32_t ses_mlen; /* hmac length in bytes */ 144 u_int32_t ses_hminner[5]; /* hmac inner state */ 145 u_int32_t ses_hmouter[5]; /* hmac outer state */ 146 u_int32_t ses_iv[4]; /* DES/3DES/AES iv */ 147 }; 148 149 struct safe_softc { 150 device_t sc_dev; /* device backpointer */ 151 struct resource *sc_irq; 152 void *sc_ih; /* interrupt handler cookie */ 153 bus_space_handle_t sc_sh; /* memory handle */ 154 bus_space_tag_t sc_st; /* memory tag */ 155 struct resource *sc_sr; /* memory resource */ 156 bus_dma_tag_t sc_srcdmat; /* source dma tag */ 157 bus_dma_tag_t sc_dstdmat; /* destination dma tag */ 158 u_int sc_chiprev; /* major/minor chip revision */ 159 int sc_flags; /* device specific flags */ 160 #define SAFE_FLAGS_KEY 0x01 /* has key accelerator */ 161 #define SAFE_FLAGS_RNG 0x02 /* hardware rng */ 162 int sc_suspended; 163 int sc_needwakeup; /* notify crypto layer */ 164 int32_t sc_cid; /* crypto tag */ 165 struct safe_dma_alloc sc_ringalloc; /* PE ring allocation state */ 166 struct safe_ringentry *sc_ring; /* PE ring */ 167 struct safe_ringentry *sc_ringtop; /* PE ring top */ 168 struct safe_ringentry *sc_front; /* next free entry */ 169 struct safe_ringentry *sc_back; /* next pending entry */ 170 int sc_nqchip; /* # passed to chip */ 171 struct mtx sc_ringmtx; /* PE ring lock */ 172 struct safe_pdesc *sc_spring; /* src particle ring */ 173 struct safe_pdesc *sc_springtop; /* src particle ring top */ 174 struct safe_pdesc *sc_spfree; /* next free src particle */ 175 struct safe_dma_alloc sc_spalloc; /* src particle ring state */ 176 struct safe_pdesc *sc_dpring; /* dest particle ring */ 177 struct safe_pdesc *sc_dpringtop; /* dest particle ring top */ 178 struct safe_pdesc *sc_dpfree; /* next free dest particle */ 179 struct safe_dma_alloc sc_dpalloc; /* dst particle ring state */ 180 int sc_nsessions; /* # of sessions */ 181 struct safe_session *sc_sessions; /* sessions */ 182 183 struct callout sc_rngto; /* rng timeout */ 184 struct rndtest_state *sc_rndtest; /* RNG test state */ 185 void (*sc_harvest)(struct rndtest_state *, 186 void *, u_int); 187 }; 188 #endif /* _KERNEL */ 189 190 struct safe_stats { 191 u_int64_t st_ibytes; 192 u_int64_t st_obytes; 193 u_int32_t st_ipackets; 194 u_int32_t st_opackets; 195 u_int32_t st_invalid; /* invalid argument */ 196 u_int32_t st_badsession; /* invalid session id */ 197 u_int32_t st_badflags; /* flags indicate !(mbuf | uio) */ 198 u_int32_t st_nodesc; /* op submitted w/o descriptors */ 199 u_int32_t st_badalg; /* unsupported algorithm */ 200 u_int32_t st_ringfull; /* PE descriptor ring full */ 201 u_int32_t st_peoperr; /* PE marked error */ 202 u_int32_t st_dmaerr; /* PE DMA error */ 203 u_int32_t st_bypasstoobig; /* bypass > 96 bytes */ 204 u_int32_t st_skipmismatch; /* enc part begins before auth part */ 205 u_int32_t st_lenmismatch; /* enc length different auth length */ 206 u_int32_t st_coffmisaligned; /* crypto offset not 32-bit aligned */ 207 u_int32_t st_cofftoobig; /* crypto offset > 255 words */ 208 u_int32_t st_iovmisaligned; /* iov op not aligned */ 209 u_int32_t st_iovnotuniform; /* iov op not suitable */ 210 u_int32_t st_unaligned; /* unaligned src caused copy */ 211 u_int32_t st_notuniform; /* non-uniform src caused copy */ 212 u_int32_t st_nomap; /* bus_dmamap_create failed */ 213 u_int32_t st_noload; /* bus_dmamap_load_* failed */ 214 u_int32_t st_nombuf; /* MGET* failed */ 215 u_int32_t st_nomcl; /* MCLGET* failed */ 216 u_int32_t st_maxqchip; /* max mcr1 ops out for processing */ 217 u_int32_t st_rng; /* RNG requests */ 218 u_int32_t st_rngalarm; /* RNG alarm requests */ 219 u_int32_t st_noicvcopy; /* ICV data copies suppressed */ 220 }; 221 #endif /* _SAFE_SAFEVAR_H_ */ 222