xref: /freebsd/sys/dev/qat_c2xxx/qatvar.h (revision 5956d97f4b3204318ceb6aa9c77bd0bc6ea87a41)
1 /* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
2 /*	$NetBSD: qatvar.h,v 1.2 2020/03/14 18:08:39 ad Exp $	*/
3 
4 /*
5  * Copyright (c) 2019 Internet Initiative Japan, Inc.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27  * POSSIBILITY OF SUCH DAMAGE.
28  */
29 
30 /*
31  *   Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
32  *
33  *   Redistribution and use in source and binary forms, with or without
34  *   modification, are permitted provided that the following conditions
35  *   are met:
36  *
37  *     * Redistributions of source code must retain the above copyright
38  *       notice, this list of conditions and the following disclaimer.
39  *     * Redistributions in binary form must reproduce the above copyright
40  *       notice, this list of conditions and the following disclaimer in
41  *       the documentation and/or other materials provided with the
42  *       distribution.
43  *     * Neither the name of Intel Corporation nor the names of its
44  *       contributors may be used to endorse or promote products derived
45  *       from this software without specific prior written permission.
46  *
47  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
48  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
49  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
50  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
51  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
53  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
54  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
55  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
56  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
57  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58  */
59 
60 /* $FreeBSD$ */
61 
62 #ifndef _DEV_PCI_QATVAR_H_
63 #define _DEV_PCI_QATVAR_H_
64 
65 #include <sys/counter.h>
66 #include <sys/malloc.h>
67 
68 #include <opencrypto/cryptodev.h>
69 
70 #define QAT_NSYMREQ	256
71 #define QAT_NSYMCOOKIE	((QAT_NSYMREQ * 2 + 1) * 2)
72 
73 #define QAT_EV_NAME_SIZE		32
74 #define QAT_RING_NAME_SIZE		32
75 
76 #define QAT_MAXSEG			HW_MAXSEG /* max segments for sg dma */
77 #define QAT_MAXLEN			65535	/* IP_MAXPACKET */
78 
79 #define QAT_HB_INTERVAL			500	/* heartbeat msec */
80 #define QAT_SSM_WDT			100
81 
82 enum qat_chip_type {
83 	QAT_CHIP_C2XXX = 0,	/* NanoQAT: Atom C2000 */
84 	QAT_CHIP_C2XXX_IOV,
85 	QAT_CHIP_C3XXX,		/* Atom C3000 */
86 	QAT_CHIP_C3XXX_IOV,
87 	QAT_CHIP_C62X,
88 	QAT_CHIP_C62X_IOV,
89 	QAT_CHIP_D15XX,
90 	QAT_CHIP_D15XX_IOV,
91 	QAT_CHIP_DH895XCC,
92 	QAT_CHIP_DH895XCC_IOV,
93 };
94 
95 enum qat_sku {
96 	QAT_SKU_UNKNOWN = 0,
97 	QAT_SKU_1,
98 	QAT_SKU_2,
99 	QAT_SKU_3,
100 	QAT_SKU_4,
101 	QAT_SKU_VF,
102 };
103 
104 enum qat_ae_status {
105 	QAT_AE_ENABLED = 1,
106 	QAT_AE_ACTIVE,
107 	QAT_AE_DISABLED
108 };
109 
110 #define TIMEOUT_AE_RESET	100
111 #define TIMEOUT_AE_CHECK	10000
112 #define TIMEOUT_AE_CSR		500
113 #define AE_EXEC_CYCLE		20
114 
115 #define QAT_UOF_MAX_PAGE		1
116 #define QAT_UOF_MAX_PAGE_REGION		1
117 
118 struct qat_dmamem {
119 	bus_dma_tag_t qdm_dma_tag;
120 	bus_dmamap_t qdm_dma_map;
121 	bus_size_t qdm_dma_size;
122 	bus_dma_segment_t qdm_dma_seg;
123 	void *qdm_dma_vaddr;
124 };
125 
126 /* Valid internal ring size values */
127 #define QAT_RING_SIZE_128 0x01
128 #define QAT_RING_SIZE_256 0x02
129 #define QAT_RING_SIZE_512 0x03
130 #define QAT_RING_SIZE_4K 0x06
131 #define QAT_RING_SIZE_16K 0x08
132 #define QAT_RING_SIZE_4M 0x10
133 #define QAT_MIN_RING_SIZE QAT_RING_SIZE_128
134 #define QAT_MAX_RING_SIZE QAT_RING_SIZE_4M
135 #define QAT_DEFAULT_RING_SIZE QAT_RING_SIZE_16K
136 
137 /* Valid internal msg size values */
138 #define QAT_MSG_SIZE_32 0x01
139 #define QAT_MSG_SIZE_64 0x02
140 #define QAT_MSG_SIZE_128 0x04
141 #define QAT_MIN_MSG_SIZE QAT_MSG_SIZE_32
142 #define QAT_MAX_MSG_SIZE QAT_MSG_SIZE_128
143 
144 /* Size to bytes conversion macros for ring and msg size values */
145 #define QAT_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
146 #define QAT_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
147 #define QAT_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
148 #define QAT_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
149 
150 /* Minimum ring buffer size for memory allocation */
151 #define QAT_RING_SIZE_BYTES_MIN(SIZE) \
152 	((SIZE < QAT_SIZE_TO_RING_SIZE_IN_BYTES(QAT_RING_SIZE_4K)) ? \
153 		QAT_SIZE_TO_RING_SIZE_IN_BYTES(QAT_RING_SIZE_4K) : SIZE)
154 #define QAT_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
155 #define QAT_SIZE_TO_POW(SIZE) ((((SIZE & 0x4) >> 1) | ((SIZE & 0x4) >> 2) | \
156 				SIZE) & ~0x4)
157 /* Max outstanding requests */
158 #define QAT_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
159 	((((1 << (RING_SIZE - 1)) << 3) >> QAT_SIZE_TO_POW(MSG_SIZE)) - 1)
160 
161 #define QAT_RING_PATTERN		0x7f
162 
163 struct qat_softc;
164 
165 typedef int (*qat_cb_t)(struct qat_softc *, void *, void *);
166 
167 struct qat_ring {
168 	struct mtx qr_ring_mtx;   /* Lock per ring */
169 	bool qr_need_wakeup;
170 	void *qr_ring_vaddr;
171 	uint32_t * volatile qr_inflight;	/* tx/rx shared */
172 	uint32_t qr_head;
173 	uint32_t qr_tail;
174 	uint8_t qr_msg_size;
175 	uint8_t qr_ring_size;
176 	uint32_t qr_ring;	/* ring number in bank */
177 	uint32_t qr_bank;	/* bank number in device */
178 	uint32_t qr_ring_id;
179 	uint32_t qr_ring_mask;
180 	qat_cb_t qr_cb;
181 	void *qr_cb_arg;
182 	struct qat_dmamem qr_dma;
183 	bus_addr_t qr_ring_paddr;
184 
185 	const char *qr_name;
186 };
187 
188 struct qat_bank {
189 	struct qat_softc *qb_sc;	/* back pointer to softc */
190 	uint32_t qb_intr_mask;		/* current interrupt mask */
191 	uint32_t qb_allocated_rings;	/* current allocated ring bitfiled */
192 	uint32_t qb_coalescing_time;	/* timer in nano sec, 0: disabled */
193 #define COALESCING_TIME_INTERVAL_DEFAULT	10000
194 #define COALESCING_TIME_INTERVAL_MIN		500
195 #define COALESCING_TIME_INTERVAL_MAX		0xfffff
196 	uint32_t qb_bank;		/* bank index */
197 	struct mtx qb_bank_mtx;
198 	struct resource *qb_ih;
199 	void *qb_ih_cookie;
200 
201 	struct qat_ring qb_et_rings[MAX_RING_PER_BANK];
202 
203 };
204 
205 struct qat_ap_bank {
206 	uint32_t qab_nf_mask;
207 	uint32_t qab_nf_dest;
208 	uint32_t qab_ne_mask;
209 	uint32_t qab_ne_dest;
210 };
211 
212 struct qat_ae_page {
213 	struct qat_ae_page *qap_next;
214 	struct qat_uof_page *qap_page;
215 	struct qat_ae_region *qap_region;
216 	u_int qap_flags;
217 };
218 
219 #define QAT_AE_PAGA_FLAG_WAITING	(1 << 0)
220 
221 struct qat_ae_region {
222 	struct qat_ae_page *qar_loaded_page;
223 	STAILQ_HEAD(, qat_ae_page) qar_waiting_pages;
224 };
225 
226 struct qat_ae_slice {
227 	u_int qas_assigned_ctx_mask;
228 	struct qat_ae_region qas_regions[QAT_UOF_MAX_PAGE_REGION];
229 	struct qat_ae_page qas_pages[QAT_UOF_MAX_PAGE];
230 	struct qat_ae_page *qas_cur_pages[MAX_AE_CTX];
231 	struct qat_uof_image *qas_image;
232 };
233 
234 #define QAT_AE(sc, ae)			\
235 		((sc)->sc_ae[ae])
236 
237 struct qat_ae {
238 	u_int qae_state;		/* AE state */
239 	u_int qae_ustore_size;		/* free micro-store address */
240 	u_int qae_free_addr;		/* free micro-store address */
241 	u_int qae_free_size;		/* free micro-store size */
242 	u_int qae_live_ctx_mask;	/* live context mask */
243 	u_int qae_ustore_dram_addr;	/* micro-store DRAM address */
244 	u_int qae_reload_size;		/* reloadable code size */
245 
246 	/* aefw */
247 	u_int qae_num_slices;
248 	struct qat_ae_slice qae_slices[MAX_AE_CTX];
249 	u_int qae_reloc_ustore_dram;	/* reloadable ustore-dram address */
250 	u_int qae_effect_ustore_size;	/* effective AE ustore size */
251 	u_int qae_shareable_ustore;
252 };
253 
254 struct qat_mof {
255 	void *qmf_sym;			/* SYM_OBJS in sc_fw_mof */
256 	size_t qmf_sym_size;
257 	void *qmf_uof_objs;		/* UOF_OBJS in sc_fw_mof */
258 	size_t qmf_uof_objs_size;
259 	void *qmf_suof_objs;		/* SUOF_OBJS in sc_fw_mof */
260 	size_t qmf_suof_objs_size;
261 };
262 
263 struct qat_ae_batch_init {
264 	u_int qabi_ae;
265 	u_int qabi_addr;
266 	u_int *qabi_value;
267 	u_int qabi_size;
268 	STAILQ_ENTRY(qat_ae_batch_init) qabi_next;
269 };
270 
271 STAILQ_HEAD(qat_ae_batch_init_list, qat_ae_batch_init);
272 
273 /* overwritten struct uof_uword_block */
274 struct qat_uof_uword_block {
275 	u_int quub_start_addr;		/* start address */
276 	u_int quub_num_words;		/* number of microwords */
277 	uint64_t quub_micro_words;	/* pointer to the uwords */
278 };
279 
280 struct qat_uof_page {
281 	u_int qup_page_num;		/* page number */
282 	u_int qup_def_page;		/* default page */
283 	u_int qup_page_region;		/* region of page */
284 	u_int qup_beg_vaddr;		/* begin virtual address */
285 	u_int qup_beg_paddr;		/* begin physical address */
286 
287 	u_int qup_num_uc_var;		/* num of uC var in array */
288 	struct uof_uword_fixup *qup_uc_var;
289 					/* array of import variables */
290 	u_int qup_num_imp_var;		/* num of import var in array */
291 	struct uof_import_var *qup_imp_var;
292 					/* array of import variables */
293 	u_int qup_num_imp_expr;		/* num of import expr in array */
294 	struct uof_uword_fixup *qup_imp_expr;
295 					/* array of import expressions */
296 	u_int qup_num_neigh_reg;	/* num of neigh-reg in array */
297 	struct uof_uword_fixup *qup_neigh_reg;
298 					/* array of neigh-reg assignments */
299 	u_int qup_num_micro_words;	/* number of microwords in the seg */
300 
301 	u_int qup_num_uw_blocks;	/* number of uword blocks */
302 	struct qat_uof_uword_block *qup_uw_blocks;
303 					/* array of uword blocks */
304 };
305 
306 struct qat_uof_image {
307 	struct uof_image *qui_image;		/* image pointer */
308 	struct qat_uof_page qui_pages[QAT_UOF_MAX_PAGE];
309 						/* array of pages */
310 
311 	u_int qui_num_ae_reg;			/* num of registers */
312 	struct uof_ae_reg *qui_ae_reg;		/* array of registers */
313 
314 	u_int qui_num_init_reg_sym;		/* num of reg/sym init values */
315 	struct uof_init_reg_sym *qui_init_reg_sym;
316 					/* array of reg/sym init values */
317 
318 	u_int qui_num_sbreak;			/* num of sbreak values */
319 	struct qui_sbreak *qui_sbreak;		/* array of sbreak values */
320 
321 	u_int qui_num_uwords_used;
322 				/* highest uword addressreferenced + 1 */
323 };
324 
325 struct qat_aefw_uof {
326 	size_t qafu_size;			/* uof size */
327 	struct uof_obj_hdr *qafu_obj_hdr;	/* UOF_OBJS */
328 
329 	void *qafu_str_tab;
330 	size_t qafu_str_tab_size;
331 
332 	u_int qafu_num_init_mem;
333 	struct uof_init_mem *qafu_init_mem;
334 	size_t qafu_init_mem_size;
335 
336 	struct uof_var_mem_seg *qafu_var_mem_seg;
337 
338 	struct qat_ae_batch_init_list qafu_lm_init[MAX_AE];
339 	size_t qafu_num_lm_init[MAX_AE];
340 	size_t qafu_num_lm_init_inst[MAX_AE];
341 
342 	u_int qafu_num_imgs;			/* number of uof image */
343 	struct qat_uof_image qafu_imgs[MAX_NUM_AE * MAX_AE_CTX];
344 						/* uof images */
345 };
346 
347 #define QAT_SERVICE_CRYPTO_A		(1 << 0)
348 #define QAT_SERVICE_CRYPTO_B		(1 << 1)
349 
350 struct qat_admin_rings {
351 	uint32_t qadr_active_aes_per_accel;
352 	uint8_t qadr_srv_mask[MAX_AE_PER_ACCEL];
353 
354 	struct qat_dmamem qadr_dma;
355 	struct fw_init_ring_table *qadr_master_ring_tbl;
356 	struct fw_init_ring_table *qadr_cya_ring_tbl;
357 	struct fw_init_ring_table *qadr_cyb_ring_tbl;
358 
359 	struct qat_ring *qadr_admin_tx;
360 	struct qat_ring *qadr_admin_rx;
361 };
362 
363 struct qat_accel_init_cb {
364 	int qaic_status;
365 };
366 
367 struct qat_admin_comms {
368 	struct qat_dmamem qadc_dma;
369 	struct qat_dmamem qadc_const_tbl_dma;
370 	struct qat_dmamem qadc_hb_dma;
371 };
372 
373 #define QAT_PID_MINOR_REV 0xf
374 #define QAT_PID_MAJOR_REV (0xf << 4)
375 
376 struct qat_suof_image {
377 	char *qsi_simg_buf;
378 	u_long qsi_simg_len;
379 	char *qsi_css_header;
380 	char *qsi_css_key;
381 	char *qsi_css_signature;
382 	char *qsi_css_simg;
383 	u_long qsi_simg_size;
384 	u_int qsi_ae_num;
385 	u_int qsi_ae_mask;
386 	u_int qsi_fw_type;
387 	u_long qsi_simg_name;
388 	u_long qsi_appmeta_data;
389 	struct qat_dmamem qsi_dma;
390 };
391 
392 struct qat_aefw_suof {
393 	u_int qafs_file_id;
394 	u_int qafs_check_sum;
395 	char qafs_min_ver;
396 	char qafs_maj_ver;
397 	char qafs_fw_type;
398 	char *qafs_suof_buf;
399 	u_int qafs_suof_size;
400 	char *qafs_sym_str;
401 	u_int qafs_sym_size;
402 	u_int qafs_num_simgs;
403 	struct qat_suof_image *qafs_simg;
404 };
405 
406 enum qat_sym_hash_algorithm {
407 	QAT_SYM_HASH_NONE = 0,
408 	QAT_SYM_HASH_MD5 = 1,
409 	QAT_SYM_HASH_SHA1 = 2,
410 	QAT_SYM_HASH_SHA224 = 3,
411 	QAT_SYM_HASH_SHA256 = 4,
412 	QAT_SYM_HASH_SHA384 = 5,
413 	QAT_SYM_HASH_SHA512 = 6,
414 	QAT_SYM_HASH_AES_XCBC = 7,
415 	QAT_SYM_HASH_AES_CCM = 8,
416 	QAT_SYM_HASH_AES_GCM = 9,
417 	QAT_SYM_HASH_KASUMI_F9 = 10,
418 	QAT_SYM_HASH_SNOW3G_UIA2 = 11,
419 	QAT_SYM_HASH_AES_CMAC = 12,
420 	QAT_SYM_HASH_AES_GMAC = 13,
421 	QAT_SYM_HASH_AES_CBC_MAC = 14,
422 };
423 
424 #define QAT_HASH_MD5_BLOCK_SIZE			64
425 #define QAT_HASH_MD5_DIGEST_SIZE		16
426 #define QAT_HASH_MD5_STATE_SIZE			16
427 #define QAT_HASH_SHA1_BLOCK_SIZE		64
428 #define QAT_HASH_SHA1_DIGEST_SIZE		20
429 #define QAT_HASH_SHA1_STATE_SIZE		20
430 #define QAT_HASH_SHA224_BLOCK_SIZE		64
431 #define QAT_HASH_SHA224_DIGEST_SIZE		28
432 #define QAT_HASH_SHA224_STATE_SIZE		32
433 #define QAT_HASH_SHA256_BLOCK_SIZE		64
434 #define QAT_HASH_SHA256_DIGEST_SIZE		32
435 #define QAT_HASH_SHA256_STATE_SIZE		32
436 #define QAT_HASH_SHA384_BLOCK_SIZE		128
437 #define QAT_HASH_SHA384_DIGEST_SIZE		48
438 #define QAT_HASH_SHA384_STATE_SIZE		64
439 #define QAT_HASH_SHA512_BLOCK_SIZE		128
440 #define QAT_HASH_SHA512_DIGEST_SIZE		64
441 #define QAT_HASH_SHA512_STATE_SIZE		64
442 #define QAT_HASH_XCBC_PRECOMP_KEY_NUM		3
443 #define QAT_HASH_XCBC_MAC_BLOCK_SIZE		16
444 #define QAT_HASH_XCBC_MAC_128_DIGEST_SIZE	16
445 #define QAT_HASH_CMAC_BLOCK_SIZE		16
446 #define QAT_HASH_CMAC_128_DIGEST_SIZE		16
447 #define QAT_HASH_AES_CCM_BLOCK_SIZE		16
448 #define QAT_HASH_AES_CCM_DIGEST_SIZE		16
449 #define QAT_HASH_AES_GCM_BLOCK_SIZE		16
450 #define QAT_HASH_AES_GCM_DIGEST_SIZE		16
451 #define QAT_HASH_AES_GCM_STATE_SIZE		16
452 #define QAT_HASH_KASUMI_F9_BLOCK_SIZE		8
453 #define QAT_HASH_KASUMI_F9_DIGEST_SIZE		4
454 #define QAT_HASH_SNOW3G_UIA2_BLOCK_SIZE		8
455 #define QAT_HASH_SNOW3G_UIA2_DIGEST_SIZE	4
456 #define QAT_HASH_AES_CBC_MAC_BLOCK_SIZE		16
457 #define QAT_HASH_AES_CBC_MAC_DIGEST_SIZE	16
458 #define QAT_HASH_AES_GCM_ICV_SIZE_8		8
459 #define QAT_HASH_AES_GCM_ICV_SIZE_12		12
460 #define QAT_HASH_AES_GCM_ICV_SIZE_16		16
461 #define QAT_HASH_AES_CCM_ICV_SIZE_MIN		4
462 #define QAT_HASH_AES_CCM_ICV_SIZE_MAX		16
463 #define QAT_HASH_IPAD_BYTE			0x36
464 #define QAT_HASH_OPAD_BYTE			0x5c
465 #define QAT_HASH_IPAD_4_BYTES			0x36363636
466 #define QAT_HASH_OPAD_4_BYTES			0x5c5c5c5c
467 #define QAT_HASH_KASUMI_F9_KEY_MODIFIER_4_BYTES	0xAAAAAAAA
468 
469 #define QAT_SYM_XCBC_STATE_SIZE		((QAT_HASH_XCBC_MAC_BLOCK_SIZE) * 3)
470 #define QAT_SYM_CMAC_STATE_SIZE		((QAT_HASH_CMAC_BLOCK_SIZE) * 3)
471 
472 struct qat_sym_hash_alg_info {
473 	uint32_t qshai_digest_len;		/* Digest length in bytes */
474 	uint32_t qshai_block_len;		/* Block length in bytes */
475 	uint32_t qshai_state_size;		/* size of above state in bytes */
476 	const uint8_t *qshai_init_state;	/* Initial state */
477 
478 	const struct auth_hash *qshai_sah;	/* software auth hash */
479 	uint32_t qshai_state_offset;		/* offset to state in *_CTX */
480 	uint32_t qshai_state_word;
481 };
482 
483 struct qat_sym_hash_qat_info {
484 	uint32_t qshqi_algo_enc;	/* QAT Algorithm encoding */
485 	uint32_t qshqi_auth_counter;	/* Counter value for Auth */
486 	uint32_t qshqi_state1_len;	/* QAT state1 length in bytes */
487 	uint32_t qshqi_state2_len;	/* QAT state2 length in bytes */
488 };
489 
490 struct qat_sym_hash_def {
491 	const struct qat_sym_hash_alg_info *qshd_alg;
492 	const struct qat_sym_hash_qat_info *qshd_qat;
493 };
494 
495 #define QAT_SYM_REQ_PARAMS_SIZE_MAX			(24 + 32)
496 /* Reserve enough space for cipher and authentication request params */
497 /* Basis of values are guaranteed in qat_hw*var.h with CTASSERT */
498 
499 #define QAT_SYM_REQ_PARAMS_SIZE_PADDED			\
500 		roundup(QAT_SYM_REQ_PARAMS_SIZE_MAX, QAT_OPTIMAL_ALIGN)
501 /* Pad out to 64-byte multiple to ensure optimal alignment of next field */
502 
503 #define QAT_SYM_KEY_TLS_PREFIX_SIZE			(128)
504 /* Hash Prefix size in bytes for TLS (128 = MAX = SHA2 (384, 512)*/
505 
506 #define QAT_SYM_KEY_MAX_HASH_STATE_BUFFER		\
507 		(QAT_SYM_KEY_TLS_PREFIX_SIZE * 2)
508 /* hash state prefix buffer structure that holds the maximum sized secret */
509 
510 #define QAT_SYM_HASH_BUFFER_LEN			QAT_HASH_SHA512_STATE_SIZE
511 /* Buffer length to hold 16 byte MD5 key and 20 byte SHA1 key */
512 
513 #define QAT_GCM_AAD_SIZE_MAX		240
514 /* Maximum AAD size */
515 
516 #define	QAT_AES_GCM_AAD_ALIGN		16
517 
518 struct qat_sym_bulk_cookie {
519 	uint8_t qsbc_req_params_buf[QAT_SYM_REQ_PARAMS_SIZE_PADDED];
520 	/* memory block reserved for request params, QAT 1.5 only
521 	 * NOTE: Field must be correctly aligned in memory for access by QAT
522 	 * engine */
523 	struct qat_crypto *qsbc_crypto;
524 	struct qat_session *qsbc_session;
525 	/* Session context */
526 	void *qsbc_cb_tag;
527 	/* correlator supplied by the client */
528 	uint8_t qsbc_msg[QAT_MSG_SIZE_TO_BYTES(QAT_MAX_MSG_SIZE)];
529 	/* QAT request message */
530 } __aligned(QAT_OPTIMAL_ALIGN);
531 
532 /* Basis of values are guaranteed in qat_hw*var.h with CTASSERT */
533 #define HASH_CONTENT_DESC_SIZE		176
534 #define CIPHER_CONTENT_DESC_SIZE	64
535 
536 #define CONTENT_DESC_MAX_SIZE	roundup(				\
537 		HASH_CONTENT_DESC_SIZE + CIPHER_CONTENT_DESC_SIZE,	\
538 		QAT_OPTIMAL_ALIGN)
539 
540 enum qat_sym_dma {
541 	QAT_SYM_DMA_AADBUF = 0,
542 	QAT_SYM_DMA_BUF,
543 	QAT_SYM_DMA_OBUF,
544 	QAT_SYM_DMA_COUNT,
545 };
546 
547 struct qat_sym_dmamap {
548 	bus_dmamap_t qsd_dmamap;
549 	bus_dma_tag_t qsd_dma_tag;
550 };
551 
552 struct qat_sym_cookie {
553 	struct qat_sym_bulk_cookie qsc_bulk_cookie;
554 
555 	/* should be 64-byte aligned */
556 	struct buffer_list_desc qsc_buf_list;
557 	struct buffer_list_desc qsc_obuf_list;
558 
559 	bus_dmamap_t qsc_self_dmamap;
560 	bus_dma_tag_t qsc_self_dma_tag;
561 
562 	uint8_t qsc_iv_buf[EALG_MAX_BLOCK_LEN];
563 	uint8_t qsc_auth_res[QAT_SYM_HASH_BUFFER_LEN];
564 	uint8_t qsc_gcm_aad[QAT_GCM_AAD_SIZE_MAX];
565 	uint8_t qsc_content_desc[CONTENT_DESC_MAX_SIZE];
566 
567 	struct qat_sym_dmamap qsc_dma[QAT_SYM_DMA_COUNT];
568 
569 	bus_addr_t qsc_bulk_req_params_buf_paddr;
570 	bus_addr_t qsc_buffer_list_desc_paddr;
571 	bus_addr_t qsc_obuffer_list_desc_paddr;
572 	bus_addr_t qsc_iv_buf_paddr;
573 	bus_addr_t qsc_auth_res_paddr;
574 	bus_addr_t qsc_gcm_aad_paddr;
575 	bus_addr_t qsc_content_desc_paddr;
576 };
577 
578 CTASSERT(offsetof(struct qat_sym_cookie,
579     qsc_bulk_cookie.qsbc_req_params_buf) % QAT_OPTIMAL_ALIGN == 0);
580 CTASSERT(offsetof(struct qat_sym_cookie, qsc_buf_list) % QAT_OPTIMAL_ALIGN == 0);
581 
582 #define MAX_CIPHER_SETUP_BLK_SZ						\
583 		(sizeof(struct hw_cipher_config) +			\
584 		2 * HW_KASUMI_KEY_SZ + 2 * HW_KASUMI_BLK_SZ)
585 #define MAX_HASH_SETUP_BLK_SZ	sizeof(union hw_auth_algo_blk)
586 
587 struct qat_crypto_desc {
588 	uint8_t qcd_content_desc[CONTENT_DESC_MAX_SIZE]; /* must be first */
589 	/* using only for qat 1.5 */
590 	uint8_t qcd_hash_state_prefix_buf[QAT_GCM_AAD_SIZE_MAX];
591 
592 	bus_addr_t qcd_desc_paddr;
593 	bus_addr_t qcd_hash_state_paddr;
594 
595 	enum fw_slice qcd_slices[MAX_FW_SLICE + 1];
596 	enum fw_la_cmd_id qcd_cmd_id;
597 	enum hw_cipher_dir qcd_cipher_dir;
598 
599 	/* content desc info */
600 	uint8_t qcd_hdr_sz;		/* in quad words */
601 	uint8_t qcd_hw_blk_sz;		/* in quad words */
602 	uint32_t qcd_cipher_offset;
603 	uint32_t qcd_auth_offset;
604 	/* hash info */
605 	uint8_t qcd_state_storage_sz;	/* in quad words */
606 	uint32_t qcd_gcm_aad_sz_offset1;
607 	uint32_t qcd_gcm_aad_sz_offset2;
608 	/* cipher info */
609 	uint16_t qcd_cipher_blk_sz;	/* in bytes */
610 	uint16_t qcd_auth_sz;		/* in bytes */
611 
612 	uint8_t qcd_req_cache[QAT_MSG_SIZE_TO_BYTES(QAT_MAX_MSG_SIZE)];
613 } __aligned(QAT_OPTIMAL_ALIGN);
614 
615 struct qat_session {
616 	struct qat_crypto_desc *qs_dec_desc;	/* should be at top of struct*/
617 	/* decrypt or auth then decrypt or auth */
618 
619 	struct qat_crypto_desc *qs_enc_desc;
620 	/* encrypt or encrypt then auth */
621 
622 	struct qat_dmamem qs_desc_mem;
623 
624 	enum hw_cipher_algo qs_cipher_algo;
625 	enum hw_cipher_mode qs_cipher_mode;
626 	enum hw_auth_algo qs_auth_algo;
627 	enum hw_auth_mode qs_auth_mode;
628 
629 	const uint8_t *qs_cipher_key;
630 	int qs_cipher_klen;
631 	const uint8_t *qs_auth_key;
632 	int qs_auth_klen;
633 	int qs_auth_mlen;
634 
635 	uint32_t qs_status;
636 #define QAT_SESSION_STATUS_ACTIVE	(1 << 0)
637 #define QAT_SESSION_STATUS_FREEING	(1 << 1)
638 	uint32_t qs_inflight;
639 	int qs_aad_length;
640 	bool qs_need_wakeup;
641 
642 	struct mtx qs_session_mtx;
643 };
644 
645 struct qat_crypto_bank {
646 	uint16_t qcb_bank;
647 
648 	struct qat_ring *qcb_sym_tx;
649 	struct qat_ring *qcb_sym_rx;
650 
651 	struct qat_dmamem qcb_symck_dmamems[QAT_NSYMCOOKIE];
652 	struct qat_sym_cookie *qcb_symck_free[QAT_NSYMCOOKIE];
653 	uint32_t qcb_symck_free_count;
654 
655 	struct mtx qcb_bank_mtx;
656 
657 	char qcb_ring_names[2][QAT_RING_NAME_SIZE];	/* sym tx,rx */
658 };
659 
660 struct qat_crypto {
661 	struct qat_softc *qcy_sc;
662 	uint32_t qcy_bank_mask;
663 	uint16_t qcy_num_banks;
664 
665 	int32_t qcy_cid;		/* OpenCrypto driver ID */
666 
667 	struct qat_crypto_bank *qcy_banks; /* array of qat_crypto_bank */
668 
669 	uint32_t qcy_session_free_count;
670 
671 	struct mtx qcy_crypto_mtx;
672 };
673 
674 struct qat_hw {
675 	int8_t qhw_sram_bar_id;
676 	int8_t qhw_misc_bar_id;
677 	int8_t qhw_etr_bar_id;
678 
679 	bus_size_t qhw_cap_global_offset;
680 	bus_size_t qhw_ae_offset;
681 	bus_size_t qhw_ae_local_offset;
682 	bus_size_t qhw_etr_bundle_size;
683 
684 	/* crypto processing callbacks */
685 	size_t qhw_crypto_opaque_offset;
686 	void (*qhw_crypto_setup_req_params)(struct qat_crypto_bank *,
687 	    struct qat_session *, struct qat_crypto_desc const *,
688 	    struct qat_sym_cookie *, struct cryptop *);
689 	void (*qhw_crypto_setup_desc)(struct qat_crypto *, struct qat_session *,
690 	    struct qat_crypto_desc *);
691 
692 	uint8_t qhw_num_banks;			/* max number of banks */
693 	uint8_t qhw_num_ap_banks;		/* max number of AutoPush banks */
694 	uint8_t qhw_num_rings_per_bank;		/* rings per bank */
695 	uint8_t qhw_num_accel;			/* max number of accelerators */
696 	uint8_t qhw_num_engines;		/* max number of accelerator engines */
697 	uint8_t qhw_tx_rx_gap;
698 	uint32_t qhw_tx_rings_mask;
699 	uint32_t qhw_clock_per_sec;
700 	bool qhw_fw_auth;
701 	uint32_t qhw_fw_req_size;
702 	uint32_t qhw_fw_resp_size;
703 
704 	uint8_t qhw_ring_sym_tx;
705 	uint8_t qhw_ring_sym_rx;
706 	uint8_t qhw_ring_asym_tx;
707 	uint8_t qhw_ring_asym_rx;
708 
709 	/* MSIx */
710 	uint32_t qhw_msix_ae_vec_gap;	/* gap to ae vec from bank */
711 
712 	const char *qhw_mof_fwname;
713 	const char *qhw_mmp_fwname;
714 
715 	uint32_t qhw_prod_type;		/* cpu type */
716 
717 	/* setup callbacks */
718 	uint32_t (*qhw_get_accel_mask)(struct qat_softc *);
719 	uint32_t (*qhw_get_ae_mask)(struct qat_softc *);
720 	enum qat_sku (*qhw_get_sku)(struct qat_softc *);
721 	uint32_t (*qhw_get_accel_cap)(struct qat_softc *);
722 	const char *(*qhw_get_fw_uof_name)(struct qat_softc *);
723 	void (*qhw_enable_intr)(struct qat_softc *);
724 	void (*qhw_init_etr_intr)(struct qat_softc *, int);
725 	int (*qhw_init_admin_comms)(struct qat_softc *);
726 	int (*qhw_send_admin_init)(struct qat_softc *);
727 	int (*qhw_init_arb)(struct qat_softc *);
728 	void (*qhw_get_arb_mapping)(struct qat_softc *, const uint32_t **);
729 	void (*qhw_enable_error_correction)(struct qat_softc *);
730 	int (*qhw_check_uncorrectable_error)(struct qat_softc *);
731 	void (*qhw_print_err_registers)(struct qat_softc *);
732 	void (*qhw_disable_error_interrupts)(struct qat_softc *);
733 	int (*qhw_check_slice_hang)(struct qat_softc *);
734 	int (*qhw_set_ssm_wdtimer)(struct qat_softc *);
735 };
736 
737 
738 /* sc_flags */
739 #define QAT_FLAG_ESRAM_ENABLE_AUTO_INIT	(1 << 0)
740 #define QAT_FLAG_SHRAM_WAIT_READY	(1 << 1)
741 
742 /* sc_accel_cap */
743 #define QAT_ACCEL_CAP_CRYPTO_SYMMETRIC	(1 << 0)
744 #define QAT_ACCEL_CAP_CRYPTO_ASYMMETRIC	(1 << 1)
745 #define QAT_ACCEL_CAP_CIPHER		(1 << 2)
746 #define QAT_ACCEL_CAP_AUTHENTICATION	(1 << 3)
747 #define QAT_ACCEL_CAP_REGEX		(1 << 4)
748 #define QAT_ACCEL_CAP_COMPRESSION	(1 << 5)
749 #define QAT_ACCEL_CAP_LZS_COMPRESSION	(1 << 6)
750 #define QAT_ACCEL_CAP_RANDOM_NUMBER	(1 << 7)
751 #define QAT_ACCEL_CAP_ZUC		(1 << 8)
752 #define QAT_ACCEL_CAP_SHA3		(1 << 9)
753 #define QAT_ACCEL_CAP_KPT		(1 << 10)
754 
755 #define QAT_ACCEL_CAP_BITS	\
756 	"\177\020"	\
757 	"b\x0a"		"KPT\0" \
758 	"b\x09"		"SHA3\0" \
759 	"b\x08"		"ZUC\0" \
760 	"b\x07"		"RANDOM_NUMBER\0" \
761 	"b\x06"		"LZS_COMPRESSION\0" \
762 	"b\x05"		"COMPRESSION\0" \
763 	"b\x04"		"REGEX\0" \
764 	"b\x03"		"AUTHENTICATION\0" \
765 	"b\x02"		"CIPHER\0" \
766 	"b\x01"		"CRYPTO_ASYMMETRIC\0" \
767 	"b\x00"		"CRYPTO_SYMMETRIC\0"
768 
769 #define QAT_HI_PRIO_RING_WEIGHT		0xfc
770 #define QAT_LO_PRIO_RING_WEIGHT		0xfe
771 #define QAT_DEFAULT_RING_WEIGHT		0xff
772 #define QAT_DEFAULT_PVL			0
773 
774 struct firmware;
775 struct resource;
776 
777 struct qat_softc {
778 	device_t sc_dev;
779 
780 	struct resource *sc_res[MAX_BARS];
781 	int sc_rid[MAX_BARS];
782 	bus_space_tag_t sc_csrt[MAX_BARS];
783 	bus_space_handle_t sc_csrh[MAX_BARS];
784 
785 	uint32_t sc_ae_num;
786 	uint32_t sc_ae_mask;
787 
788 	struct qat_crypto sc_crypto;		/* crypto services */
789 
790 	struct qat_hw sc_hw;
791 
792 	uint8_t sc_rev;
793 	enum qat_sku sc_sku;
794 	uint32_t sc_flags;
795 
796 	uint32_t sc_accel_num;
797 	uint32_t sc_accel_mask;
798 	uint32_t sc_accel_cap;
799 
800 	struct qat_admin_rings sc_admin_rings;	/* use only for qat 1.5 */
801 	struct qat_admin_comms sc_admin_comms;	/* use only for qat 1.7 */
802 
803 	/* ETR */
804 	struct qat_bank *sc_etr_banks;		/* array of etr banks */
805 	struct qat_ap_bank *sc_etr_ap_banks;	/* array of etr auto push banks */
806 
807 	/* AE */
808 	struct qat_ae sc_ae[MAX_NUM_AE];
809 
810 	/* Interrupt */
811 	struct resource *sc_ih;			/* ae cluster ih */
812 	void *sc_ih_cookie;			/* ae cluster ih cookie */
813 
814 	/* Counters */
815 	counter_u64_t sc_gcm_aad_restarts;
816 	counter_u64_t sc_gcm_aad_updates;
817 	counter_u64_t sc_ring_full_restarts;
818 	counter_u64_t sc_sym_alloc_failures;
819 
820 	/* Firmware */
821 	void *sc_fw_mof;			/* mof data */
822 	size_t sc_fw_mof_size;			/* mof size */
823 	struct qat_mof sc_mof;			/* mof sections */
824 
825 	const char *sc_fw_uof_name;		/* uof/suof name in mof */
826 
827 	void *sc_fw_uof;			/* uof head */
828 	size_t sc_fw_uof_size;			/* uof size */
829 	struct qat_aefw_uof sc_aefw_uof;	/* UOF_OBJS in uof */
830 
831 	void *sc_fw_suof;			/* suof head */
832 	size_t sc_fw_suof_size;			/* suof size */
833 	struct qat_aefw_suof sc_aefw_suof;	/* suof context */
834 
835 	void *sc_fw_mmp;			/* mmp data */
836 	size_t sc_fw_mmp_size;			/* mmp size */
837 };
838 
839 static inline void
840 qat_bar_write_4(struct qat_softc *sc, int baroff, bus_size_t offset,
841     uint32_t value)
842 {
843 
844 	MPASS(baroff >= 0 && baroff < MAX_BARS);
845 
846 	bus_space_write_4(sc->sc_csrt[baroff],
847 	    sc->sc_csrh[baroff], offset, value);
848 }
849 
850 static inline uint32_t
851 qat_bar_read_4(struct qat_softc *sc, int baroff, bus_size_t offset)
852 {
853 
854 	MPASS(baroff >= 0 && baroff < MAX_BARS);
855 
856 	return bus_space_read_4(sc->sc_csrt[baroff],
857 	    sc->sc_csrh[baroff], offset);
858 }
859 
860 static inline void
861 qat_misc_write_4(struct qat_softc *sc, bus_size_t offset, uint32_t value)
862 {
863 
864 	qat_bar_write_4(sc, sc->sc_hw.qhw_misc_bar_id, offset, value);
865 }
866 
867 static inline uint32_t
868 qat_misc_read_4(struct qat_softc *sc, bus_size_t offset)
869 {
870 
871 	return qat_bar_read_4(sc, sc->sc_hw.qhw_misc_bar_id, offset);
872 }
873 
874 static inline void
875 qat_misc_read_write_or_4(struct qat_softc *sc, bus_size_t offset,
876     uint32_t value)
877 {
878 	uint32_t reg;
879 
880 	reg = qat_misc_read_4(sc, offset);
881 	reg |= value;
882 	qat_misc_write_4(sc, offset, reg);
883 }
884 
885 static inline void
886 qat_misc_read_write_and_4(struct qat_softc *sc, bus_size_t offset,
887     uint32_t mask)
888 {
889 	uint32_t reg;
890 
891 	reg = qat_misc_read_4(sc, offset);
892 	reg &= mask;
893 	qat_misc_write_4(sc, offset, reg);
894 }
895 
896 static inline void
897 qat_etr_write_4(struct qat_softc *sc, bus_size_t offset, uint32_t value)
898 {
899 
900 	qat_bar_write_4(sc, sc->sc_hw.qhw_etr_bar_id, offset, value);
901 }
902 
903 static inline uint32_t
904 qat_etr_read_4(struct qat_softc *sc, bus_size_t offset)
905 {
906 
907 	return qat_bar_read_4(sc, sc->sc_hw.qhw_etr_bar_id, offset);
908 }
909 
910 static inline void
911 qat_ae_local_write_4(struct qat_softc *sc, u_char ae, bus_size_t offset,
912 	uint32_t value)
913 {
914 
915 	offset = __SHIFTIN(ae & sc->sc_ae_mask, AE_LOCAL_AE_MASK) |
916 	    (offset & AE_LOCAL_CSR_MASK);
917 
918 	qat_misc_write_4(sc, sc->sc_hw.qhw_ae_local_offset + offset,
919 	    value);
920 }
921 
922 static inline uint32_t
923 qat_ae_local_read_4(struct qat_softc *sc, u_char ae, bus_size_t offset)
924 {
925 
926 	offset = __SHIFTIN(ae & sc->sc_ae_mask, AE_LOCAL_AE_MASK) |
927 	    (offset & AE_LOCAL_CSR_MASK);
928 
929 	return qat_misc_read_4(sc, sc->sc_hw.qhw_ae_local_offset + offset);
930 }
931 
932 static inline void
933 qat_ae_xfer_write_4(struct qat_softc *sc, u_char ae, bus_size_t offset,
934 	uint32_t value)
935 {
936 	offset = __SHIFTIN(ae & sc->sc_ae_mask, AE_XFER_AE_MASK) |
937 	    __SHIFTIN(offset, AE_XFER_CSR_MASK);
938 
939 	qat_misc_write_4(sc, sc->sc_hw.qhw_ae_offset + offset, value);
940 }
941 
942 static inline void
943 qat_cap_global_write_4(struct qat_softc *sc, bus_size_t offset, uint32_t value)
944 {
945 
946 	qat_misc_write_4(sc, sc->sc_hw.qhw_cap_global_offset + offset, value);
947 }
948 
949 static inline uint32_t
950 qat_cap_global_read_4(struct qat_softc *sc, bus_size_t offset)
951 {
952 
953 	return qat_misc_read_4(sc, sc->sc_hw.qhw_cap_global_offset + offset);
954 }
955 
956 
957 static inline void
958 qat_etr_bank_write_4(struct qat_softc *sc, int bank,
959 	bus_size_t offset, uint32_t value)
960 {
961 
962 	qat_etr_write_4(sc, sc->sc_hw.qhw_etr_bundle_size * bank + offset,
963 	    value);
964 }
965 
966 static inline uint32_t
967 qat_etr_bank_read_4(struct qat_softc *sc, int bank,
968 	bus_size_t offset)
969 {
970 
971 	return qat_etr_read_4(sc,
972 	    sc->sc_hw.qhw_etr_bundle_size * bank + offset);
973 }
974 
975 static inline void
976 qat_etr_ap_bank_write_4(struct qat_softc *sc, int ap_bank,
977 	bus_size_t offset, uint32_t value)
978 {
979 
980 	qat_etr_write_4(sc, ETR_AP_BANK_OFFSET * ap_bank + offset, value);
981 }
982 
983 static inline uint32_t
984 qat_etr_ap_bank_read_4(struct qat_softc *sc, int ap_bank,
985 	bus_size_t offset)
986 {
987 
988 	return qat_etr_read_4(sc, ETR_AP_BANK_OFFSET * ap_bank + offset);
989 }
990 
991 
992 static inline void
993 qat_etr_bank_ring_write_4(struct qat_softc *sc, int bank, int ring,
994 	bus_size_t offset, uint32_t value)
995 {
996 
997 	qat_etr_bank_write_4(sc, bank, (ring << 2) + offset, value);
998 }
999 
1000 static inline uint32_t
1001 qat_etr_bank_ring_read_4(struct qat_softc *sc, int bank, int ring,
1002 	bus_size_t offset)
1003 {
1004 
1005 	return qat_etr_bank_read_4(sc, bank, (ring << 2) * offset);
1006 }
1007 
1008 static inline void
1009 qat_etr_bank_ring_base_write_8(struct qat_softc *sc, int bank, int ring,
1010 	uint64_t value)
1011 {
1012 	uint32_t lo, hi;
1013 
1014 	lo = (uint32_t)(value & 0xffffffff);
1015 	hi = (uint32_t)((value & 0xffffffff00000000ULL) >> 32);
1016 	qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_LBASE, lo);
1017 	qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_UBASE, hi);
1018 }
1019 
1020 static inline void
1021 qat_arb_ringsrvarben_write_4(struct qat_softc *sc, int index, uint32_t value)
1022 {
1023 
1024 	qat_etr_write_4(sc, ARB_RINGSRVARBEN_OFFSET +
1025 	    (ARB_REG_SLOT * index), value);
1026 }
1027 
1028 static inline void
1029 qat_arb_sarconfig_write_4(struct qat_softc *sc, int index, uint32_t value)
1030 {
1031 
1032 	qat_etr_write_4(sc, ARB_OFFSET +
1033 	    (ARB_REG_SIZE * index), value);
1034 }
1035 
1036 static inline void
1037 qat_arb_wrk_2_ser_map_write_4(struct qat_softc *sc, int index, uint32_t value)
1038 {
1039 
1040 	qat_etr_write_4(sc, ARB_OFFSET + ARB_WRK_2_SER_MAP_OFFSET +
1041 	    (ARB_REG_SIZE * index), value);
1042 }
1043 
1044 void *		qat_alloc_mem(size_t);
1045 void		qat_free_mem(void *);
1046 void		qat_free_dmamem(struct qat_softc *, struct qat_dmamem *);
1047 int		qat_alloc_dmamem(struct qat_softc *, struct qat_dmamem *, int,
1048 		    bus_size_t, bus_size_t);
1049 
1050 int		qat_etr_setup_ring(struct qat_softc *, int, uint32_t, uint32_t,
1051 		    uint32_t, qat_cb_t, void *, const char *,
1052 		    struct qat_ring **);
1053 int		qat_etr_put_msg(struct qat_softc *, struct qat_ring *,
1054 		    uint32_t *);
1055 
1056 void		qat_memcpy_htobe64(void *, const void *, size_t);
1057 void		qat_memcpy_htobe32(void *, const void *, size_t);
1058 void		qat_memcpy_htobe(void *, const void *, size_t, uint32_t);
1059 void		qat_crypto_gmac_precompute(const struct qat_crypto_desc *,
1060 		    const uint8_t *key, int klen,
1061 		    const struct qat_sym_hash_def *, uint8_t *);
1062 void		qat_crypto_hmac_precompute(const struct qat_crypto_desc *,
1063 		    const uint8_t *, int, const struct qat_sym_hash_def *,
1064 		    uint8_t *, uint8_t *);
1065 uint16_t	qat_crypto_load_cipher_session(const struct qat_crypto_desc *,
1066 		    const struct qat_session *);
1067 uint16_t	qat_crypto_load_auth_session(const struct qat_crypto_desc *,
1068 		    const struct qat_session *,
1069 		    struct qat_sym_hash_def const **);
1070 
1071 #endif
1072