xref: /freebsd/sys/dev/qat/qat_common/qat_uclo.c (revision d5b0e70f7e04d971691517ce1304d86a1e367e2e)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_transport_access_macros.h"
13 #include "adf_transport_internal.h"
14 #include <sys/ctype.h>
15 #include <sys/kernel.h>
16 #include <linux/delay.h>
17 #include "adf_accel_devices.h"
18 #include "adf_common_drv.h"
19 #include "icp_qat_uclo.h"
20 #include "icp_qat_hal.h"
21 #include "icp_qat_fw_loader_handle.h"
22 
23 #define UWORD_CPYBUF_SIZE 1024
24 #define INVLD_UWORD 0xffffffffffull
25 #define PID_MINOR_REV 0xf
26 #define PID_MAJOR_REV (0xf << 4)
27 #define MAX_UINT32_VAL 0xfffffffful
28 
29 static int
30 qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
31 		      unsigned int ae,
32 		      unsigned int image_num)
33 {
34 	struct icp_qat_uclo_aedata *ae_data;
35 	struct icp_qat_uclo_encapme *encap_image;
36 	struct icp_qat_uclo_page *page = NULL;
37 	struct icp_qat_uclo_aeslice *ae_slice = NULL;
38 
39 	ae_data = &obj_handle->ae_data[ae];
40 	encap_image = &obj_handle->ae_uimage[image_num];
41 	ae_slice = &ae_data->ae_slices[ae_data->slice_num];
42 	ae_slice->encap_image = encap_image;
43 
44 	if (encap_image->img_ptr) {
45 		ae_slice->ctx_mask_assigned =
46 		    encap_image->img_ptr->ctx_assigned;
47 		ae_data->shareable_ustore =
48 		    ICP_QAT_SHARED_USTORE_MODE(encap_image->img_ptr->ae_mode);
49 		ae_data->eff_ustore_size = ae_data->shareable_ustore ?
50 		    (obj_handle->ustore_phy_size << 1) :
51 		    obj_handle->ustore_phy_size;
52 	} else {
53 		ae_slice->ctx_mask_assigned = 0;
54 	}
55 	ae_slice->region =
56 	    malloc(sizeof(*ae_slice->region), M_QAT, M_WAITOK | M_ZERO);
57 	ae_slice->page =
58 	    malloc(sizeof(*ae_slice->page), M_QAT, M_WAITOK | M_ZERO);
59 	page = ae_slice->page;
60 	page->encap_page = encap_image->page;
61 	ae_slice->page->region = ae_slice->region;
62 	ae_data->slice_num++;
63 	return 0;
64 }
65 
66 static int
67 qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
68 {
69 	unsigned int i;
70 
71 	if (!ae_data) {
72 		pr_err("QAT: bad argument, ae_data is NULL\n ");
73 		return EINVAL;
74 	}
75 
76 	for (i = 0; i < ae_data->slice_num; i++) {
77 		free(ae_data->ae_slices[i].region, M_QAT);
78 		ae_data->ae_slices[i].region = NULL;
79 		free(ae_data->ae_slices[i].page, M_QAT);
80 		ae_data->ae_slices[i].page = NULL;
81 	}
82 	return 0;
83 }
84 
85 static char *
86 qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
87 		    unsigned int str_offset)
88 {
89 	if (!str_table->table_len || str_offset > str_table->table_len)
90 		return NULL;
91 	return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
92 }
93 
94 static int
95 qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
96 {
97 	int maj = hdr->maj_ver & 0xff;
98 	int min = hdr->min_ver & 0xff;
99 
100 	if (hdr->file_id != ICP_QAT_UOF_FID) {
101 		pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
102 		return EINVAL;
103 	}
104 	if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
105 		pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
106 		       maj,
107 		       min);
108 		return EINVAL;
109 	}
110 	return 0;
111 }
112 
113 static int
114 qat_uclo_check_suof_format(const struct icp_qat_suof_filehdr *suof_hdr)
115 {
116 	int maj = suof_hdr->maj_ver & 0xff;
117 	int min = suof_hdr->min_ver & 0xff;
118 
119 	if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
120 		pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id);
121 		return EINVAL;
122 	}
123 	if (suof_hdr->fw_type != 0) {
124 		pr_err("QAT: unsupported firmware type\n");
125 		return EINVAL;
126 	}
127 	if (suof_hdr->num_chunks <= 0x1) {
128 		pr_err("QAT: SUOF chunk amount is incorrect\n");
129 		return EINVAL;
130 	}
131 	if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
132 		pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n",
133 		       maj,
134 		       min);
135 		return EINVAL;
136 	}
137 	return 0;
138 }
139 
140 static int
141 qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
142 			  unsigned int addr,
143 			  const unsigned int *val,
144 			  unsigned int num_in_bytes)
145 {
146 	unsigned int outval;
147 	const unsigned char *ptr = (const unsigned char *)val;
148 
149 	if (num_in_bytes > handle->hal_sram_size) {
150 		pr_err("QAT: error, mmp size overflow %d\n", num_in_bytes);
151 		return EINVAL;
152 	}
153 	while (num_in_bytes) {
154 		memcpy(&outval, ptr, 4);
155 		SRAM_WRITE(handle, addr, outval);
156 		num_in_bytes -= 4;
157 		ptr += 4;
158 		addr += 4;
159 	}
160 	return 0;
161 }
162 
163 static void
164 qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
165 			  unsigned char ae,
166 			  unsigned int addr,
167 			  unsigned int *val,
168 			  unsigned int num_in_bytes)
169 {
170 	unsigned int outval;
171 	unsigned char *ptr = (unsigned char *)val;
172 
173 	addr >>= 0x2; /* convert to uword address */
174 
175 	while (num_in_bytes) {
176 		memcpy(&outval, ptr, 4);
177 		qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
178 		num_in_bytes -= 4;
179 		ptr += 4;
180 	}
181 }
182 
183 static void
184 qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
185 		       unsigned char ae,
186 		       struct icp_qat_uof_batch_init *umem_init_header)
187 {
188 	struct icp_qat_uof_batch_init *umem_init;
189 
190 	if (!umem_init_header)
191 		return;
192 	umem_init = umem_init_header->next;
193 	while (umem_init) {
194 		unsigned int addr, *value, size;
195 
196 		ae = umem_init->ae;
197 		addr = umem_init->addr;
198 		value = umem_init->value;
199 		size = umem_init->size;
200 		qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
201 		umem_init = umem_init->next;
202 	}
203 }
204 
205 static void
206 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
207 				 struct icp_qat_uof_batch_init **base)
208 {
209 	struct icp_qat_uof_batch_init *umem_init;
210 
211 	umem_init = *base;
212 	while (umem_init) {
213 		struct icp_qat_uof_batch_init *pre;
214 
215 		pre = umem_init;
216 		umem_init = umem_init->next;
217 		free(pre, M_QAT);
218 	}
219 	*base = NULL;
220 }
221 
222 static int
223 qat_uclo_parse_num(char *str, unsigned int *num)
224 {
225 	char buf[16] = { 0 };
226 	unsigned long ae = 0;
227 	int i;
228 
229 	strncpy(buf, str, 15);
230 	for (i = 0; i < 16; i++) {
231 		if (!isdigit(buf[i])) {
232 			buf[i] = '\0';
233 			break;
234 		}
235 	}
236 	if ((compat_strtoul(buf, 10, &ae)))
237 		return EFAULT;
238 
239 	if (ae > MAX_UINT32_VAL)
240 		return EFAULT;
241 
242 	*num = (unsigned int)ae;
243 	return 0;
244 }
245 
246 static int
247 qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
248 			  struct icp_qat_uof_initmem *init_mem,
249 			  unsigned int size_range,
250 			  unsigned int *ae)
251 {
252 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
253 	char *str;
254 
255 	if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
256 		pr_err("QAT: initmem is out of range");
257 		return EINVAL;
258 	}
259 	if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
260 		pr_err("QAT: Memory scope for init_mem error\n");
261 		return EINVAL;
262 	}
263 	str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
264 	if (!str) {
265 		pr_err("QAT: AE name assigned in UOF init table is NULL\n");
266 		return EINVAL;
267 	}
268 	if (qat_uclo_parse_num(str, ae)) {
269 		pr_err("QAT: Parse num for AE number failed\n");
270 		return EINVAL;
271 	}
272 	if (*ae >= ICP_QAT_UCLO_MAX_AE) {
273 		pr_err("QAT: ae %d out of range\n", *ae);
274 		return EINVAL;
275 	}
276 	return 0;
277 }
278 
279 static int
280 qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle *handle,
281 				struct icp_qat_uof_initmem *init_mem,
282 				unsigned int ae,
283 				struct icp_qat_uof_batch_init **init_tab_base)
284 {
285 	struct icp_qat_uof_batch_init *init_header, *tail;
286 	struct icp_qat_uof_batch_init *mem_init, *tail_old;
287 	struct icp_qat_uof_memvar_attr *mem_val_attr;
288 	unsigned int i = 0;
289 
290 	mem_val_attr =
291 	    (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
292 					       sizeof(
293 						   struct icp_qat_uof_initmem));
294 
295 	init_header = *init_tab_base;
296 	if (!init_header) {
297 		init_header =
298 		    malloc(sizeof(*init_header), M_QAT, M_WAITOK | M_ZERO);
299 		init_header->size = 1;
300 		*init_tab_base = init_header;
301 	}
302 	tail_old = init_header;
303 	while (tail_old->next)
304 		tail_old = tail_old->next;
305 	tail = tail_old;
306 	for (i = 0; i < init_mem->val_attr_num; i++) {
307 		mem_init = malloc(sizeof(*mem_init), M_QAT, M_WAITOK | M_ZERO);
308 		mem_init->ae = ae;
309 		mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
310 		mem_init->value = &mem_val_attr->value;
311 		mem_init->size = 4;
312 		mem_init->next = NULL;
313 		tail->next = mem_init;
314 		tail = mem_init;
315 		init_header->size += qat_hal_get_ins_num();
316 		mem_val_attr++;
317 	}
318 	return 0;
319 }
320 
321 static int
322 qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
323 		       struct icp_qat_uof_initmem *init_mem)
324 {
325 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
326 	unsigned int ae;
327 
328 	if (qat_uclo_fetch_initmem_ae(
329 		handle, init_mem, ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
330 		return EINVAL;
331 	if (qat_uclo_create_batch_init_list(
332 		handle, init_mem, ae, &obj_handle->lm_init_tab[ae]))
333 		return EINVAL;
334 	return 0;
335 }
336 
337 static int
338 qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
339 		       struct icp_qat_uof_initmem *init_mem)
340 {
341 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
342 	unsigned int ae, ustore_size, uaddr, i;
343 	struct icp_qat_uclo_aedata *aed;
344 
345 	ustore_size = obj_handle->ustore_phy_size;
346 	if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
347 		return EINVAL;
348 	if (qat_uclo_create_batch_init_list(
349 		handle, init_mem, ae, &obj_handle->umem_init_tab[ae]))
350 		return EINVAL;
351 	/* set the highest ustore address referenced */
352 	uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
353 	aed = &obj_handle->ae_data[ae];
354 	for (i = 0; i < aed->slice_num; i++) {
355 		if (aed->ae_slices[i].encap_image->uwords_num < uaddr)
356 			aed->ae_slices[i].encap_image->uwords_num = uaddr;
357 	}
358 	return 0;
359 }
360 
361 #define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
362 static int
363 qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
364 			struct icp_qat_uof_initmem *init_mem)
365 {
366 	switch (init_mem->region) {
367 	case ICP_QAT_UOF_LMEM_REGION:
368 		if (qat_uclo_init_lmem_seg(handle, init_mem))
369 			return EINVAL;
370 		break;
371 	case ICP_QAT_UOF_UMEM_REGION:
372 		if (qat_uclo_init_umem_seg(handle, init_mem))
373 			return EINVAL;
374 		break;
375 	default:
376 		pr_err("QAT: initmem region error. region type=0x%x\n",
377 		       init_mem->region);
378 		return EINVAL;
379 	}
380 	return 0;
381 }
382 
383 static int
384 qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
385 		     struct icp_qat_uclo_encapme *image)
386 {
387 	unsigned int i;
388 	struct icp_qat_uclo_encap_page *page;
389 	struct icp_qat_uof_image *uof_image;
390 	unsigned char ae = 0;
391 	unsigned char neigh_ae;
392 	unsigned int ustore_size;
393 	unsigned int patt_pos;
394 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
395 	uint64_t *fill_data;
396 	static unsigned int init[32] = { 0 };
397 	unsigned long ae_mask = handle->hal_handle->ae_mask;
398 
399 	uof_image = image->img_ptr;
400 	/*if shared CS mode, the ustore size should be 2*ustore_phy_size*/
401 	fill_data = malloc(obj_handle->ustore_phy_size * 2 * sizeof(uint64_t),
402 			   M_QAT,
403 			   M_WAITOK | M_ZERO);
404 	for (i = 0; i < obj_handle->ustore_phy_size * 2; i++)
405 		memcpy(&fill_data[i],
406 		       &uof_image->fill_pattern,
407 		       sizeof(uint64_t));
408 	page = image->page;
409 
410 	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
411 	{
412 		unsigned long cfg_ae_mask = handle->cfg_ae_mask;
413 		unsigned long ae_assigned = uof_image->ae_assigned;
414 
415 		if (!test_bit(ae, &cfg_ae_mask))
416 			continue;
417 
418 		if (!test_bit(ae, &ae_assigned))
419 			continue;
420 
421 		if (obj_handle->ae_data[ae].shareable_ustore && (ae & 1)) {
422 			qat_hal_get_scs_neigh_ae(ae, &neigh_ae);
423 
424 			if (test_bit(neigh_ae, &ae_assigned))
425 				continue;
426 		}
427 
428 		ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
429 		patt_pos = page->beg_addr_p + page->micro_words_num;
430 		if (obj_handle->ae_data[ae].shareable_ustore) {
431 			qat_hal_get_scs_neigh_ae(ae, &neigh_ae);
432 			if (init[ae] == 0 && page->beg_addr_p != 0) {
433 				qat_hal_wr_coalesce_uwords(handle,
434 							   (unsigned char)ae,
435 							   0,
436 							   page->beg_addr_p,
437 							   &fill_data[0]);
438 			}
439 			qat_hal_wr_coalesce_uwords(
440 			    handle,
441 			    (unsigned char)ae,
442 			    patt_pos,
443 			    ustore_size - patt_pos,
444 			    &fill_data[page->beg_addr_p]);
445 			init[ae] = 1;
446 			init[neigh_ae] = 1;
447 		} else {
448 			qat_hal_wr_uwords(handle,
449 					  (unsigned char)ae,
450 					  0,
451 					  page->beg_addr_p,
452 					  &fill_data[0]);
453 			qat_hal_wr_uwords(handle,
454 					  (unsigned char)ae,
455 					  patt_pos,
456 					  ustore_size - patt_pos + 1,
457 					  &fill_data[page->beg_addr_p]);
458 		}
459 	}
460 	free(fill_data, M_QAT);
461 	return 0;
462 }
463 
464 static int
465 qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
466 {
467 	int i;
468 	int ae = 0;
469 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
470 	struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
471 	unsigned long ae_mask = handle->hal_handle->ae_mask;
472 
473 	for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
474 		if (initmem->num_in_bytes) {
475 			if (qat_uclo_init_ae_memory(handle, initmem))
476 				return EINVAL;
477 		}
478 		initmem =
479 		    (struct icp_qat_uof_initmem
480 			 *)((uintptr_t)((uintptr_t)initmem +
481 					sizeof(struct icp_qat_uof_initmem)) +
482 			    (sizeof(struct icp_qat_uof_memvar_attr) *
483 			     initmem->val_attr_num));
484 	}
485 
486 	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
487 	{
488 		if (qat_hal_batch_wr_lm(handle,
489 					ae,
490 					obj_handle->lm_init_tab[ae])) {
491 			pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
492 			return EINVAL;
493 		}
494 		qat_uclo_cleanup_batch_init_list(handle,
495 						 &obj_handle->lm_init_tab[ae]);
496 		qat_uclo_batch_wr_umem(handle,
497 				       ae,
498 				       obj_handle->umem_init_tab[ae]);
499 		qat_uclo_cleanup_batch_init_list(
500 		    handle, &obj_handle->umem_init_tab[ae]);
501 	}
502 	return 0;
503 }
504 
505 static void *
506 qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
507 		    char *chunk_id,
508 		    void *cur)
509 {
510 	int i;
511 	struct icp_qat_uof_chunkhdr *chunk_hdr =
512 	    (struct icp_qat_uof_chunkhdr *)((uintptr_t)obj_hdr +
513 					    sizeof(struct icp_qat_uof_objhdr));
514 
515 	for (i = 0; i < obj_hdr->num_chunks; i++) {
516 		if ((cur < (void *)&chunk_hdr[i]) &&
517 		    !strncmp(chunk_hdr[i].chunk_id,
518 			     chunk_id,
519 			     ICP_QAT_UOF_OBJID_LEN)) {
520 			return &chunk_hdr[i];
521 		}
522 	}
523 	return NULL;
524 }
525 
526 static unsigned int
527 qat_uclo_calc_checksum(unsigned int reg, int ch)
528 {
529 	int i;
530 	unsigned int topbit = 1 << 0xF;
531 	unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
532 
533 	reg ^= inbyte << 0x8;
534 	for (i = 0; i < 0x8; i++) {
535 		if (reg & topbit)
536 			reg = (reg << 1) ^ 0x1021;
537 		else
538 			reg <<= 1;
539 	}
540 	return reg & 0xFFFF;
541 }
542 
543 static unsigned int
544 qat_uclo_calc_str_checksum(const char *ptr, int num)
545 {
546 	unsigned int chksum = 0;
547 
548 	if (ptr)
549 		while (num--)
550 			chksum = qat_uclo_calc_checksum(chksum, *ptr++);
551 	return chksum;
552 }
553 
554 static struct icp_qat_uclo_objhdr *
555 qat_uclo_map_chunk(char *buf,
556 		   struct icp_qat_uof_filehdr *file_hdr,
557 		   char *chunk_id)
558 {
559 	struct icp_qat_uof_filechunkhdr *file_chunk;
560 	struct icp_qat_uclo_objhdr *obj_hdr;
561 	char *chunk;
562 	int i;
563 
564 	file_chunk = (struct icp_qat_uof_filechunkhdr
565 			  *)(buf + sizeof(struct icp_qat_uof_filehdr));
566 	for (i = 0; i < file_hdr->num_chunks; i++) {
567 		if (!strncmp(file_chunk->chunk_id,
568 			     chunk_id,
569 			     ICP_QAT_UOF_OBJID_LEN)) {
570 			chunk = buf + file_chunk->offset;
571 			if (file_chunk->checksum !=
572 			    qat_uclo_calc_str_checksum(chunk, file_chunk->size))
573 				break;
574 			obj_hdr =
575 			    malloc(sizeof(*obj_hdr), M_QAT, M_WAITOK | M_ZERO);
576 			obj_hdr->file_buff = chunk;
577 			obj_hdr->checksum = file_chunk->checksum;
578 			obj_hdr->size = file_chunk->size;
579 			return obj_hdr;
580 		}
581 		file_chunk++;
582 	}
583 	return NULL;
584 }
585 
586 static unsigned int
587 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
588 			    struct icp_qat_uof_image *image)
589 {
590 	struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
591 	struct icp_qat_uof_objtable *neigh_reg_tab;
592 	struct icp_qat_uof_code_page *code_page;
593 
594 	code_page =
595 	    (struct icp_qat_uof_code_page *)((char *)image +
596 					     sizeof(struct icp_qat_uof_image));
597 	uc_var_tab =
598 	    (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
599 					    code_page->uc_var_tab_offset);
600 	imp_var_tab =
601 	    (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
602 					    code_page->imp_var_tab_offset);
603 	imp_expr_tab =
604 	    (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
605 					    code_page->imp_expr_tab_offset);
606 	if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
607 	    imp_expr_tab->entry_num) {
608 		pr_err("QAT: UOF can't contain imported variable to be parsed");
609 		return EINVAL;
610 	}
611 	neigh_reg_tab =
612 	    (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
613 					    code_page->neigh_reg_tab_offset);
614 	if (neigh_reg_tab->entry_num) {
615 		pr_err("QAT: UOF can't contain neighbor register table\n");
616 		return EINVAL;
617 	}
618 	if (image->numpages > 1) {
619 		pr_err("QAT: UOF can't contain multiple pages\n");
620 		return EINVAL;
621 	}
622 	if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
623 		pr_err("QAT: UOF can't use reloadable feature\n");
624 		return EFAULT;
625 	}
626 	return 0;
627 }
628 
629 static void
630 qat_uclo_map_image_page(struct icp_qat_uof_encap_obj *encap_uof_obj,
631 			struct icp_qat_uof_image *img,
632 			struct icp_qat_uclo_encap_page *page)
633 {
634 	struct icp_qat_uof_code_page *code_page;
635 	struct icp_qat_uof_code_area *code_area;
636 	struct icp_qat_uof_objtable *uword_block_tab;
637 	struct icp_qat_uof_uword_block *uwblock;
638 	int i;
639 
640 	code_page =
641 	    (struct icp_qat_uof_code_page *)((char *)img +
642 					     sizeof(struct icp_qat_uof_image));
643 	page->def_page = code_page->def_page;
644 	page->page_region = code_page->page_region;
645 	page->beg_addr_v = code_page->beg_addr_v;
646 	page->beg_addr_p = code_page->beg_addr_p;
647 	code_area =
648 	    (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
649 					     code_page->code_area_offset);
650 	page->micro_words_num = code_area->micro_words_num;
651 	uword_block_tab =
652 	    (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
653 					    code_area->uword_block_tab);
654 	page->uwblock_num = uword_block_tab->entry_num;
655 	uwblock = (struct icp_qat_uof_uword_block
656 		       *)((char *)uword_block_tab +
657 			  sizeof(struct icp_qat_uof_objtable));
658 	page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
659 	for (i = 0; i < uword_block_tab->entry_num; i++)
660 		page->uwblock[i].micro_words =
661 		    (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
662 }
663 
664 static int
665 qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
666 		    struct icp_qat_uclo_encapme *ae_uimage,
667 		    int max_image)
668 {
669 	int i, j;
670 	struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
671 	struct icp_qat_uof_image *image;
672 	struct icp_qat_uof_objtable *ae_regtab;
673 	struct icp_qat_uof_objtable *init_reg_sym_tab;
674 	struct icp_qat_uof_objtable *sbreak_tab;
675 	struct icp_qat_uof_encap_obj *encap_uof_obj =
676 	    &obj_handle->encap_uof_obj;
677 
678 	for (j = 0; j < max_image; j++) {
679 		chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
680 						ICP_QAT_UOF_IMAG,
681 						chunk_hdr);
682 		if (!chunk_hdr)
683 			break;
684 		image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
685 						     chunk_hdr->offset);
686 		ae_regtab =
687 		    (struct icp_qat_uof_objtable *)(image->reg_tab_offset +
688 						    obj_handle->obj_hdr
689 							->file_buff);
690 		ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
691 		ae_uimage[j].ae_reg =
692 		    (struct icp_qat_uof_ae_reg
693 			 *)(((char *)ae_regtab) +
694 			    sizeof(struct icp_qat_uof_objtable));
695 		init_reg_sym_tab =
696 		    (struct icp_qat_uof_objtable *)(image->init_reg_sym_tab +
697 						    obj_handle->obj_hdr
698 							->file_buff);
699 		ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
700 		ae_uimage[j].init_regsym =
701 		    (struct icp_qat_uof_init_regsym
702 			 *)(((char *)init_reg_sym_tab) +
703 			    sizeof(struct icp_qat_uof_objtable));
704 		sbreak_tab = (struct icp_qat_uof_objtable *)(image->sbreak_tab +
705 							     obj_handle->obj_hdr
706 								 ->file_buff);
707 		ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
708 		ae_uimage[j].sbreak =
709 		    (struct icp_qat_uof_sbreak
710 			 *)(((char *)sbreak_tab) +
711 			    sizeof(struct icp_qat_uof_objtable));
712 		ae_uimage[j].img_ptr = image;
713 		if (qat_uclo_check_image_compat(encap_uof_obj, image))
714 			goto out_err;
715 		ae_uimage[j].page =
716 		    malloc(sizeof(struct icp_qat_uclo_encap_page),
717 			   M_QAT,
718 			   M_WAITOK | M_ZERO);
719 		qat_uclo_map_image_page(encap_uof_obj,
720 					image,
721 					ae_uimage[j].page);
722 	}
723 	return j;
724 out_err:
725 	for (i = 0; i < j; i++)
726 		free(ae_uimage[i].page, M_QAT);
727 	return 0;
728 }
729 
730 static int
731 qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
732 {
733 	int i;
734 	int ae = 0;
735 	unsigned long ae_mask = handle->hal_handle->ae_mask;
736 	unsigned long cfg_ae_mask = handle->cfg_ae_mask;
737 	int mflag = 0;
738 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
739 
740 	for_each_set_bit(ae, &ae_mask, max_ae)
741 	{
742 		if (!test_bit(ae, &cfg_ae_mask))
743 			continue;
744 
745 		for (i = 0; i < obj_handle->uimage_num; i++) {
746 			unsigned long ae_assigned =
747 			    obj_handle->ae_uimage[i].img_ptr->ae_assigned;
748 			if (!test_bit(ae, &ae_assigned))
749 				continue;
750 			mflag = 1;
751 			if (qat_uclo_init_ae_data(obj_handle, ae, i))
752 				return EINVAL;
753 		}
754 	}
755 	if (!mflag) {
756 		pr_err("QAT: uimage uses AE not set");
757 		return EINVAL;
758 	}
759 	return 0;
760 }
761 
762 static struct icp_qat_uof_strtable *
763 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
764 		       char *tab_name,
765 		       struct icp_qat_uof_strtable *str_table)
766 {
767 	struct icp_qat_uof_chunkhdr *chunk_hdr;
768 
769 	chunk_hdr =
770 	    qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)obj_hdr->file_buff,
771 				tab_name,
772 				NULL);
773 	if (chunk_hdr) {
774 		int hdr_size;
775 
776 		memcpy(&str_table->table_len,
777 		       obj_hdr->file_buff + chunk_hdr->offset,
778 		       sizeof(str_table->table_len));
779 		hdr_size = (char *)&str_table->strings - (char *)str_table;
780 		str_table->strings = (uintptr_t)obj_hdr->file_buff +
781 		    chunk_hdr->offset + hdr_size;
782 		return str_table;
783 	}
784 	return NULL;
785 }
786 
787 static void
788 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
789 			   struct icp_qat_uclo_init_mem_table *init_mem_tab)
790 {
791 	struct icp_qat_uof_chunkhdr *chunk_hdr;
792 
793 	chunk_hdr =
794 	    qat_uclo_find_chunk(encap_uof_obj->obj_hdr, ICP_QAT_UOF_IMEM, NULL);
795 	if (chunk_hdr) {
796 		memmove(&init_mem_tab->entry_num,
797 			encap_uof_obj->beg_uof + chunk_hdr->offset,
798 			sizeof(unsigned int));
799 		init_mem_tab->init_mem =
800 		    (struct icp_qat_uof_initmem *)(encap_uof_obj->beg_uof +
801 						   chunk_hdr->offset +
802 						   sizeof(unsigned int));
803 	}
804 }
805 
806 static unsigned int
807 qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
808 {
809 	switch (pci_get_device(GET_DEV(handle->accel_dev))) {
810 	case ADF_DH895XCC_PCI_DEVICE_ID:
811 		return ICP_QAT_AC_895XCC_DEV_TYPE;
812 	case ADF_C62X_PCI_DEVICE_ID:
813 		return ICP_QAT_AC_C62X_DEV_TYPE;
814 	case ADF_C3XXX_PCI_DEVICE_ID:
815 		return ICP_QAT_AC_C3XXX_DEV_TYPE;
816 	case ADF_200XX_PCI_DEVICE_ID:
817 		return ICP_QAT_AC_200XX_DEV_TYPE;
818 	case ADF_C4XXX_PCI_DEVICE_ID:
819 		return ICP_QAT_AC_C4XXX_DEV_TYPE;
820 	default:
821 		pr_err("QAT: unsupported device 0x%x\n",
822 		       pci_get_device(GET_DEV(handle->accel_dev)));
823 		return 0;
824 	}
825 }
826 
827 static int
828 qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
829 {
830 	unsigned int maj_ver, prod_type = obj_handle->prod_type;
831 
832 	if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
833 		pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n",
834 		       obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
835 		       prod_type);
836 		return EINVAL;
837 	}
838 	maj_ver = obj_handle->prod_rev & 0xff;
839 	if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver ||
840 	    obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) {
841 		pr_err("QAT: UOF maj_ver 0x%x out of range\n", maj_ver);
842 		return EINVAL;
843 	}
844 	return 0;
845 }
846 
847 static int
848 qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
849 		  unsigned char ae,
850 		  unsigned char ctx_mask,
851 		  enum icp_qat_uof_regtype reg_type,
852 		  unsigned short reg_addr,
853 		  unsigned int value)
854 {
855 	switch (reg_type) {
856 	case ICP_GPA_ABS:
857 	case ICP_GPB_ABS:
858 		ctx_mask = 0;
859 		return qat_hal_init_gpr(
860 		    handle, ae, ctx_mask, reg_type, reg_addr, value);
861 	case ICP_GPA_REL:
862 	case ICP_GPB_REL:
863 		return qat_hal_init_gpr(
864 		    handle, ae, ctx_mask, reg_type, reg_addr, value);
865 	case ICP_SR_ABS:
866 	case ICP_DR_ABS:
867 	case ICP_SR_RD_ABS:
868 	case ICP_DR_RD_ABS:
869 		ctx_mask = 0;
870 		return qat_hal_init_rd_xfer(
871 		    handle, ae, ctx_mask, reg_type, reg_addr, value);
872 	case ICP_SR_REL:
873 	case ICP_DR_REL:
874 	case ICP_SR_RD_REL:
875 	case ICP_DR_RD_REL:
876 		return qat_hal_init_rd_xfer(
877 		    handle, ae, ctx_mask, reg_type, reg_addr, value);
878 	case ICP_SR_WR_ABS:
879 	case ICP_DR_WR_ABS:
880 		ctx_mask = 0;
881 		return qat_hal_init_wr_xfer(
882 		    handle, ae, ctx_mask, reg_type, reg_addr, value);
883 	case ICP_SR_WR_REL:
884 	case ICP_DR_WR_REL:
885 		return qat_hal_init_wr_xfer(
886 		    handle, ae, ctx_mask, reg_type, reg_addr, value);
887 	case ICP_NEIGH_REL:
888 		return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
889 	default:
890 		pr_err("QAT: UOF uses unsupported reg type 0x%x\n", reg_type);
891 		return EFAULT;
892 	}
893 	return 0;
894 }
895 
896 static int
897 qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
898 		      unsigned int ae,
899 		      struct icp_qat_uclo_encapme *encap_ae)
900 {
901 	unsigned int i;
902 	unsigned char ctx_mask;
903 	struct icp_qat_uof_init_regsym *init_regsym;
904 
905 	if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
906 	    ICP_QAT_UCLO_MAX_CTX)
907 		ctx_mask = 0xff;
908 	else
909 		ctx_mask = 0x55;
910 
911 	for (i = 0; i < encap_ae->init_regsym_num; i++) {
912 		unsigned int exp_res;
913 
914 		init_regsym = &encap_ae->init_regsym[i];
915 		exp_res = init_regsym->value;
916 		switch (init_regsym->init_type) {
917 		case ICP_QAT_UOF_INIT_REG:
918 			qat_uclo_init_reg(handle,
919 					  ae,
920 					  ctx_mask,
921 					  (enum icp_qat_uof_regtype)
922 					      init_regsym->reg_type,
923 					  (unsigned short)init_regsym->reg_addr,
924 					  exp_res);
925 			break;
926 		case ICP_QAT_UOF_INIT_REG_CTX:
927 			/* check if ctx is appropriate for the ctxMode */
928 			if (!((1 << init_regsym->ctx) & ctx_mask)) {
929 				pr_err("QAT: invalid ctx num = 0x%x\n",
930 				       init_regsym->ctx);
931 				return EINVAL;
932 			}
933 			qat_uclo_init_reg(
934 			    handle,
935 			    ae,
936 			    (unsigned char)(1 << init_regsym->ctx),
937 			    (enum icp_qat_uof_regtype)init_regsym->reg_type,
938 			    (unsigned short)init_regsym->reg_addr,
939 			    exp_res);
940 			break;
941 		case ICP_QAT_UOF_INIT_EXPR:
942 			pr_err("QAT: INIT_EXPR feature not supported\n");
943 			return EINVAL;
944 		case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
945 			pr_err("QAT: INIT_EXPR_ENDIAN_SWAP not supported\n");
946 			return EINVAL;
947 		default:
948 			break;
949 		}
950 	}
951 	return 0;
952 }
953 
954 static int
955 qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
956 {
957 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
958 	unsigned int s;
959 	unsigned int ae = 0;
960 	struct icp_qat_uclo_aedata *aed;
961 	unsigned long ae_mask = handle->hal_handle->ae_mask;
962 
963 	if (obj_handle->global_inited)
964 		return 0;
965 	if (obj_handle->init_mem_tab.entry_num) {
966 		if (qat_uclo_init_memory(handle)) {
967 			pr_err("QAT: initialize memory failed\n");
968 			return EINVAL;
969 		}
970 	}
971 
972 	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
973 	{
974 		aed = &obj_handle->ae_data[ae];
975 		for (s = 0; s < aed->slice_num; s++) {
976 			if (!aed->ae_slices[s].encap_image)
977 				continue;
978 			if (qat_uclo_init_reg_sym(
979 				handle, ae, aed->ae_slices[s].encap_image))
980 				return EINVAL;
981 		}
982 	}
983 	obj_handle->global_inited = 1;
984 	return 0;
985 }
986 
987 static int
988 qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle,
989 		  struct icp_qat_uclo_objhandle *obj_handle,
990 		  unsigned char ae,
991 		  struct icp_qat_uof_image *uof_image)
992 {
993 	unsigned char nn_mode;
994 	char ae_mode = 0;
995 
996 	ae_mode = (char)ICP_QAT_CTX_MODE(uof_image->ae_mode);
997 	if (qat_hal_set_ae_ctx_mode(handle, ae, ae_mode)) {
998 		pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
999 		return EFAULT;
1000 	}
1001 
1002 	ae_mode = (char)ICP_QAT_SHARED_USTORE_MODE(uof_image->ae_mode);
1003 	qat_hal_set_ae_scs_mode(handle, ae, ae_mode);
1004 	nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
1005 
1006 	if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
1007 		pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
1008 		return EFAULT;
1009 	}
1010 	ae_mode = (char)ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode);
1011 	if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, ae_mode)) {
1012 		pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
1013 		return EFAULT;
1014 	}
1015 	ae_mode = (char)ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode);
1016 	if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, ae_mode)) {
1017 		pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
1018 		return EFAULT;
1019 	}
1020 	if (obj_handle->prod_type == ICP_QAT_AC_C4XXX_DEV_TYPE) {
1021 		ae_mode = (char)ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode);
1022 		if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, ae_mode)) {
1023 			pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n");
1024 			return EFAULT;
1025 		}
1026 		ae_mode = (char)ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode);
1027 		if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, ae_mode)) {
1028 			pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n");
1029 			return EFAULT;
1030 		}
1031 		ae_mode = (char)ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode);
1032 		qat_hal_set_ae_tindex_mode(handle, ae, ae_mode);
1033 	}
1034 	return 0;
1035 }
1036 
1037 static int
1038 qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
1039 {
1040 	int error;
1041 	unsigned char s;
1042 	unsigned char ae = 0;
1043 	struct icp_qat_uof_image *uof_image;
1044 	struct icp_qat_uclo_aedata *ae_data;
1045 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1046 	unsigned long ae_mask = handle->hal_handle->ae_mask;
1047 
1048 	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
1049 	{
1050 		unsigned long cfg_ae_mask = handle->cfg_ae_mask;
1051 
1052 		if (!test_bit(ae, &cfg_ae_mask))
1053 			continue;
1054 
1055 		ae_data = &obj_handle->ae_data[ae];
1056 		for (s = 0; s < min_t(unsigned int,
1057 				      ae_data->slice_num,
1058 				      ICP_QAT_UCLO_MAX_CTX);
1059 		     s++) {
1060 			if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
1061 				continue;
1062 			uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
1063 			error = qat_hal_set_modes(handle,
1064 						  obj_handle,
1065 						  ae,
1066 						  uof_image);
1067 			if (error)
1068 				return error;
1069 		}
1070 	}
1071 	return 0;
1072 }
1073 
1074 static void
1075 qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
1076 {
1077 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1078 	struct icp_qat_uclo_encapme *image;
1079 	int a;
1080 
1081 	for (a = 0; a < obj_handle->uimage_num; a++) {
1082 		image = &obj_handle->ae_uimage[a];
1083 		image->uwords_num =
1084 		    image->page->beg_addr_p + image->page->micro_words_num;
1085 	}
1086 }
1087 
1088 static int
1089 qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
1090 {
1091 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1092 	unsigned int ae;
1093 
1094 	obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
1095 	obj_handle->encap_uof_obj.obj_hdr =
1096 	    (struct icp_qat_uof_objhdr *)obj_handle->obj_hdr->file_buff;
1097 	obj_handle->uword_in_bytes = 6;
1098 	obj_handle->prod_type = qat_uclo_get_dev_type(handle);
1099 	obj_handle->prod_rev =
1100 	    PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id);
1101 	if (qat_uclo_check_uof_compat(obj_handle)) {
1102 		pr_err("QAT: UOF incompatible\n");
1103 		return EINVAL;
1104 	}
1105 	obj_handle->uword_buf = malloc(UWORD_CPYBUF_SIZE * sizeof(uint64_t),
1106 				       M_QAT,
1107 				       M_WAITOK | M_ZERO);
1108 	obj_handle->ustore_phy_size =
1109 	    (obj_handle->prod_type == ICP_QAT_AC_C4XXX_DEV_TYPE) ? 0x2000 :
1110 								   0x4000;
1111 	if (!obj_handle->obj_hdr->file_buff ||
1112 	    !qat_uclo_map_str_table(obj_handle->obj_hdr,
1113 				    ICP_QAT_UOF_STRT,
1114 				    &obj_handle->str_table)) {
1115 		pr_err("QAT: UOF doesn't have effective images\n");
1116 		goto out_err;
1117 	}
1118 	obj_handle->uimage_num =
1119 	    qat_uclo_map_uimage(obj_handle,
1120 				obj_handle->ae_uimage,
1121 				ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
1122 	if (!obj_handle->uimage_num)
1123 		goto out_err;
1124 	if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
1125 		pr_err("QAT: Bad object\n");
1126 		goto out_check_uof_aemask_err;
1127 	}
1128 	qat_uclo_init_uword_num(handle);
1129 	qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
1130 				   &obj_handle->init_mem_tab);
1131 	if (qat_uclo_set_ae_mode(handle))
1132 		goto out_check_uof_aemask_err;
1133 	return 0;
1134 out_check_uof_aemask_err:
1135 	for (ae = 0; ae < obj_handle->uimage_num; ae++)
1136 		free(obj_handle->ae_uimage[ae].page, M_QAT);
1137 out_err:
1138 	free(obj_handle->uword_buf, M_QAT);
1139 	obj_handle->uword_buf = NULL;
1140 	return EFAULT;
1141 }
1142 
1143 static int
1144 qat_uclo_map_suof_file_hdr(const struct icp_qat_fw_loader_handle *handle,
1145 			   const struct icp_qat_suof_filehdr *suof_ptr,
1146 			   int suof_size)
1147 {
1148 	unsigned int check_sum = 0;
1149 	unsigned int min_ver_offset = 0;
1150 	struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1151 
1152 	suof_handle->file_id = ICP_QAT_SUOF_FID;
1153 	suof_handle->suof_buf = (const char *)suof_ptr;
1154 	suof_handle->suof_size = suof_size;
1155 	min_ver_offset =
1156 	    suof_size - offsetof(struct icp_qat_suof_filehdr, min_ver);
1157 	check_sum = qat_uclo_calc_str_checksum((const char *)&suof_ptr->min_ver,
1158 					       min_ver_offset);
1159 	if (check_sum != suof_ptr->check_sum) {
1160 		pr_err("QAT: incorrect SUOF checksum\n");
1161 		return EINVAL;
1162 	}
1163 	suof_handle->check_sum = suof_ptr->check_sum;
1164 	suof_handle->min_ver = suof_ptr->min_ver;
1165 	suof_handle->maj_ver = suof_ptr->maj_ver;
1166 	suof_handle->fw_type = suof_ptr->fw_type;
1167 	return 0;
1168 }
1169 
1170 static void
1171 qat_uclo_map_simg(struct icp_qat_suof_handle *suof_handle,
1172 		  struct icp_qat_suof_img_hdr *suof_img_hdr,
1173 		  struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1174 {
1175 	const struct icp_qat_simg_ae_mode *ae_mode;
1176 	struct icp_qat_suof_objhdr *suof_objhdr;
1177 
1178 	suof_img_hdr->simg_buf =
1179 	    (suof_handle->suof_buf + suof_chunk_hdr->offset +
1180 	     sizeof(*suof_objhdr));
1181 	suof_img_hdr->simg_len =
1182 	    ((struct icp_qat_suof_objhdr *)(uintptr_t)(suof_handle->suof_buf +
1183 						       suof_chunk_hdr->offset))
1184 		->img_length;
1185 
1186 	suof_img_hdr->css_header = suof_img_hdr->simg_buf;
1187 	suof_img_hdr->css_key =
1188 	    (suof_img_hdr->css_header + sizeof(struct icp_qat_css_hdr));
1189 	suof_img_hdr->css_signature = suof_img_hdr->css_key +
1190 	    ICP_QAT_CSS_FWSK_MODULUS_LEN + ICP_QAT_CSS_FWSK_EXPONENT_LEN;
1191 	suof_img_hdr->css_simg =
1192 	    suof_img_hdr->css_signature + ICP_QAT_CSS_SIGNATURE_LEN;
1193 
1194 	ae_mode = (const struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
1195 	suof_img_hdr->ae_mask = ae_mode->ae_mask;
1196 	suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
1197 	suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
1198 	suof_img_hdr->fw_type = ae_mode->fw_type;
1199 }
1200 
1201 static void
1202 qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
1203 			  struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1204 {
1205 	char **sym_str = (char **)&suof_handle->sym_str;
1206 	unsigned int *sym_size = &suof_handle->sym_size;
1207 	struct icp_qat_suof_strtable *str_table_obj;
1208 
1209 	*sym_size = *(unsigned int *)(uintptr_t)(suof_chunk_hdr->offset +
1210 						 suof_handle->suof_buf);
1211 	*sym_str =
1212 	    (char *)(uintptr_t)(suof_handle->suof_buf + suof_chunk_hdr->offset +
1213 				sizeof(str_table_obj->tab_length));
1214 }
1215 
1216 static int
1217 qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
1218 			   struct icp_qat_suof_img_hdr *img_hdr)
1219 {
1220 	const struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
1221 	unsigned int prod_rev, maj_ver, prod_type;
1222 
1223 	prod_type = qat_uclo_get_dev_type(handle);
1224 	img_ae_mode = (const struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
1225 	prod_rev =
1226 	    PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id);
1227 	if (img_ae_mode->dev_type != prod_type) {
1228 		pr_err("QAT: incompatible product type %x\n",
1229 		       img_ae_mode->dev_type);
1230 		return EINVAL;
1231 	}
1232 	maj_ver = prod_rev & 0xff;
1233 	if (maj_ver > img_ae_mode->devmax_ver ||
1234 	    maj_ver < img_ae_mode->devmin_ver) {
1235 		pr_err("QAT: incompatible device maj_ver 0x%x\n", maj_ver);
1236 		return EINVAL;
1237 	}
1238 	return 0;
1239 }
1240 
1241 static void
1242 qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
1243 {
1244 	struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
1245 
1246 	free(sobj_handle->img_table.simg_hdr, M_QAT);
1247 	sobj_handle->img_table.simg_hdr = NULL;
1248 	free(handle->sobj_handle, M_QAT);
1249 	handle->sobj_handle = NULL;
1250 }
1251 
1252 static void
1253 qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
1254 		  unsigned int img_id,
1255 		  unsigned int num_simgs)
1256 {
1257 	struct icp_qat_suof_img_hdr img_header;
1258 
1259 	if ((img_id != num_simgs - 1) && img_id != ICP_QAT_UCLO_MAX_AE) {
1260 		memcpy(&img_header,
1261 		       &suof_img_hdr[num_simgs - 1],
1262 		       sizeof(*suof_img_hdr));
1263 		memcpy(&suof_img_hdr[num_simgs - 1],
1264 		       &suof_img_hdr[img_id],
1265 		       sizeof(*suof_img_hdr));
1266 		memcpy(&suof_img_hdr[img_id],
1267 		       &img_header,
1268 		       sizeof(*suof_img_hdr));
1269 	}
1270 }
1271 
1272 static int
1273 qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
1274 		  const struct icp_qat_suof_filehdr *suof_ptr,
1275 		  int suof_size)
1276 {
1277 	struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1278 	struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
1279 	struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
1280 	int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
1281 	unsigned int i = 0;
1282 	struct icp_qat_suof_img_hdr img_header;
1283 
1284 	if (!suof_ptr || suof_size == 0) {
1285 		pr_err("QAT: input parameter SUOF pointer/size is NULL\n");
1286 		return EINVAL;
1287 	}
1288 	if (qat_uclo_check_suof_format(suof_ptr))
1289 		return EINVAL;
1290 	ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
1291 	if (ret)
1292 		return ret;
1293 	suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)((uintptr_t)suof_ptr +
1294 							   sizeof(*suof_ptr));
1295 
1296 	qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
1297 	suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
1298 
1299 	if (suof_handle->img_table.num_simgs != 0) {
1300 		suof_img_hdr = malloc(suof_handle->img_table.num_simgs *
1301 					  sizeof(img_header),
1302 				      M_QAT,
1303 				      M_WAITOK | M_ZERO);
1304 		suof_handle->img_table.simg_hdr = suof_img_hdr;
1305 	}
1306 
1307 	for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
1308 		qat_uclo_map_simg(handle->sobj_handle,
1309 				  &suof_img_hdr[i],
1310 				  &suof_chunk_hdr[1 + i]);
1311 		ret = qat_uclo_check_simg_compat(handle, &suof_img_hdr[i]);
1312 		if (ret)
1313 			return ret;
1314 		suof_img_hdr[i].ae_mask &= handle->cfg_ae_mask;
1315 		if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
1316 			ae0_img = i;
1317 	}
1318 	qat_uclo_tail_img(suof_img_hdr,
1319 			  ae0_img,
1320 			  suof_handle->img_table.num_simgs);
1321 	return 0;
1322 }
1323 
1324 #define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + (low))
1325 #define BITS_IN_DWORD 32
1326 
1327 static int
1328 qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
1329 		 struct icp_qat_fw_auth_desc *desc)
1330 {
1331 	unsigned int fcu_sts, mem_cfg_err, retry = 0;
1332 	unsigned int fcu_ctl_csr, fcu_sts_csr;
1333 	unsigned int fcu_dram_hi_csr, fcu_dram_lo_csr;
1334 	u64 bus_addr;
1335 
1336 	bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low) -
1337 	    sizeof(struct icp_qat_auth_chunk);
1338 	if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
1339 		fcu_ctl_csr = FCU_CONTROL_C4XXX;
1340 		fcu_sts_csr = FCU_STATUS_C4XXX;
1341 		fcu_dram_hi_csr = FCU_DRAM_ADDR_HI_C4XXX;
1342 		fcu_dram_lo_csr = FCU_DRAM_ADDR_LO_C4XXX;
1343 	} else {
1344 		fcu_ctl_csr = FCU_CONTROL;
1345 		fcu_sts_csr = FCU_STATUS;
1346 		fcu_dram_hi_csr = FCU_DRAM_ADDR_HI;
1347 		fcu_dram_lo_csr = FCU_DRAM_ADDR_LO;
1348 	}
1349 	SET_FCU_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD));
1350 	SET_FCU_CSR(handle, fcu_dram_lo_csr, bus_addr);
1351 	SET_FCU_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
1352 
1353 	do {
1354 		pause_ms("adfstop", FW_AUTH_WAIT_PERIOD);
1355 		fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr);
1356 		if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
1357 			goto auth_fail;
1358 		if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
1359 			if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
1360 				return 0;
1361 	} while (retry++ < FW_AUTH_MAX_RETRY);
1362 auth_fail:
1363 	pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n",
1364 	       fcu_sts & FCU_AUTH_STS_MASK,
1365 	       retry);
1366 	if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
1367 		mem_cfg_err =
1368 		    (GET_FCU_CSR(handle, FCU_STATUS1_C4XXX) & MEM_CFG_ERR_BIT);
1369 		if (mem_cfg_err)
1370 			pr_err("QAT: MEM_CFG_ERR\n");
1371 	}
1372 	return EINVAL;
1373 }
1374 
1375 static int
1376 qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
1377 		    struct icp_firml_dram_desc *dram_desc,
1378 		    unsigned int size)
1379 {
1380 	int ret;
1381 
1382 	ret = bus_dma_mem_create(&dram_desc->dram_mem,
1383 				 handle->accel_dev->dma_tag,
1384 				 1,
1385 				 BUS_SPACE_MAXADDR,
1386 				 size,
1387 				 0);
1388 	if (ret != 0)
1389 		return ret;
1390 	dram_desc->dram_base_addr_v = dram_desc->dram_mem.dma_vaddr;
1391 	dram_desc->dram_bus_addr = dram_desc->dram_mem.dma_baddr;
1392 	dram_desc->dram_size = size;
1393 	return 0;
1394 }
1395 
1396 static void
1397 qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
1398 		   struct icp_firml_dram_desc *dram_desc)
1399 {
1400 	if (handle && dram_desc && dram_desc->dram_base_addr_v)
1401 		bus_dma_mem_free(&dram_desc->dram_mem);
1402 
1403 	if (dram_desc)
1404 		explicit_bzero(dram_desc, sizeof(*dram_desc));
1405 }
1406 
1407 static int
1408 qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
1409 		     const char *image,
1410 		     unsigned int size,
1411 		     struct icp_firml_dram_desc *img_desc,
1412 		     struct icp_qat_fw_auth_desc **desc)
1413 {
1414 	const struct icp_qat_css_hdr *css_hdr =
1415 	    (const struct icp_qat_css_hdr *)image;
1416 	struct icp_qat_fw_auth_desc *auth_desc;
1417 	struct icp_qat_auth_chunk *auth_chunk;
1418 	u64 virt_addr, bus_addr, virt_base;
1419 	unsigned int length, simg_offset = sizeof(*auth_chunk);
1420 
1421 	if (size > (ICP_QAT_AE_IMG_OFFSET + ICP_QAT_CSS_MAX_IMAGE_LEN)) {
1422 		pr_err("QAT: error, input image size overflow %d\n", size);
1423 		return EINVAL;
1424 	}
1425 	length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ?
1426 	    ICP_QAT_CSS_AE_SIMG_LEN + simg_offset :
1427 	    size + ICP_QAT_CSS_FWSK_PAD_LEN + simg_offset;
1428 	if (qat_uclo_simg_alloc(handle, img_desc, length)) {
1429 		pr_err("QAT: error, allocate continuous dram fail\n");
1430 		return -ENOMEM;
1431 	}
1432 
1433 	auth_chunk = img_desc->dram_base_addr_v;
1434 	auth_chunk->chunk_size = img_desc->dram_size;
1435 	auth_chunk->chunk_bus_addr = img_desc->dram_bus_addr;
1436 	virt_base = (uintptr_t)img_desc->dram_base_addr_v + simg_offset;
1437 	bus_addr = img_desc->dram_bus_addr + simg_offset;
1438 	auth_desc = img_desc->dram_base_addr_v;
1439 	auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1440 	auth_desc->css_hdr_low = (unsigned int)bus_addr;
1441 	virt_addr = virt_base;
1442 
1443 	memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr));
1444 	/* pub key */
1445 	bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
1446 	    sizeof(*css_hdr);
1447 	virt_addr = virt_addr + sizeof(*css_hdr);
1448 
1449 	auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1450 	auth_desc->fwsk_pub_low = (unsigned int)bus_addr;
1451 
1452 	memcpy((void *)(uintptr_t)virt_addr,
1453 	       (const void *)(image + sizeof(*css_hdr)),
1454 	       ICP_QAT_CSS_FWSK_MODULUS_LEN);
1455 	/* padding */
1456 	explicit_bzero((void *)(uintptr_t)(virt_addr +
1457 					   ICP_QAT_CSS_FWSK_MODULUS_LEN),
1458 		       ICP_QAT_CSS_FWSK_PAD_LEN);
1459 
1460 	/* exponent */
1461 	memcpy((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN +
1462 				   ICP_QAT_CSS_FWSK_PAD_LEN),
1463 	       (const void *)(image + sizeof(*css_hdr) +
1464 			      ICP_QAT_CSS_FWSK_MODULUS_LEN),
1465 	       sizeof(unsigned int));
1466 
1467 	/* signature */
1468 	bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high, auth_desc->fwsk_pub_low) +
1469 	    ICP_QAT_CSS_FWSK_PUB_LEN;
1470 	virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN;
1471 	auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1472 	auth_desc->signature_low = (unsigned int)bus_addr;
1473 
1474 	memcpy((void *)(uintptr_t)virt_addr,
1475 	       (const void *)(image + sizeof(*css_hdr) +
1476 			      ICP_QAT_CSS_FWSK_MODULUS_LEN +
1477 			      ICP_QAT_CSS_FWSK_EXPONENT_LEN),
1478 	       ICP_QAT_CSS_SIGNATURE_LEN);
1479 
1480 	bus_addr =
1481 	    ADD_ADDR(auth_desc->signature_high, auth_desc->signature_low) +
1482 	    ICP_QAT_CSS_SIGNATURE_LEN;
1483 	virt_addr += ICP_QAT_CSS_SIGNATURE_LEN;
1484 
1485 	auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD);
1486 	auth_desc->img_low = (unsigned int)bus_addr;
1487 	auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET;
1488 	memcpy((void *)(uintptr_t)virt_addr,
1489 	       (const void *)(image + ICP_QAT_AE_IMG_OFFSET),
1490 	       auth_desc->img_len);
1491 	virt_addr = virt_base;
1492 	/* AE firmware */
1493 	if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type ==
1494 	    CSS_AE_FIRMWARE) {
1495 		auth_desc->img_ae_mode_data_high = auth_desc->img_high;
1496 		auth_desc->img_ae_mode_data_low = auth_desc->img_low;
1497 		bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
1498 				    auth_desc->img_ae_mode_data_low) +
1499 		    sizeof(struct icp_qat_simg_ae_mode);
1500 
1501 		auth_desc->img_ae_init_data_high =
1502 		    (unsigned int)(bus_addr >> BITS_IN_DWORD);
1503 		auth_desc->img_ae_init_data_low = (unsigned int)bus_addr;
1504 		bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
1505 		auth_desc->img_ae_insts_high =
1506 		    (unsigned int)(bus_addr >> BITS_IN_DWORD);
1507 		auth_desc->img_ae_insts_low = (unsigned int)bus_addr;
1508 		virt_addr += sizeof(struct icp_qat_css_hdr) +
1509 		    ICP_QAT_CSS_FWSK_PUB_LEN + ICP_QAT_CSS_SIGNATURE_LEN;
1510 		auth_desc->ae_mask =
1511 		    ((struct icp_qat_simg_ae_mode *)virt_addr)->ae_mask &
1512 		    handle->cfg_ae_mask;
1513 	} else {
1514 		auth_desc->img_ae_insts_high = auth_desc->img_high;
1515 		auth_desc->img_ae_insts_low = auth_desc->img_low;
1516 	}
1517 	*desc = auth_desc;
1518 	return 0;
1519 }
1520 
1521 static int
1522 qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
1523 		 struct icp_qat_fw_auth_desc *desc)
1524 {
1525 	unsigned int i = 0;
1526 	unsigned int fcu_sts;
1527 	unsigned int fcu_sts_csr, fcu_ctl_csr;
1528 	unsigned int loaded_aes = FCU_LOADED_AE_POS;
1529 	unsigned long ae_mask = handle->hal_handle->ae_mask;
1530 
1531 	if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
1532 		fcu_ctl_csr = FCU_CONTROL_C4XXX;
1533 		fcu_sts_csr = FCU_STATUS_C4XXX;
1534 
1535 	} else {
1536 		fcu_ctl_csr = FCU_CONTROL;
1537 		fcu_sts_csr = FCU_STATUS;
1538 	}
1539 
1540 	for_each_set_bit(i, &ae_mask, handle->hal_handle->ae_max_num)
1541 	{
1542 		int retry = 0;
1543 
1544 		if (!((desc->ae_mask >> i) & 0x1))
1545 			continue;
1546 		if (qat_hal_check_ae_active(handle, i)) {
1547 			pr_err("QAT: AE %d is active\n", i);
1548 			return EINVAL;
1549 		}
1550 		SET_FCU_CSR(handle,
1551 			    fcu_ctl_csr,
1552 			    (FCU_CTRL_CMD_LOAD | (i << FCU_CTRL_AE_POS)));
1553 
1554 		do {
1555 			pause_ms("adfstop", FW_AUTH_WAIT_PERIOD);
1556 			fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr);
1557 			if ((fcu_sts & FCU_AUTH_STS_MASK) ==
1558 			    FCU_STS_LOAD_DONE) {
1559 				loaded_aes = IS_QAT_GEN3(pci_get_device(
1560 						 GET_DEV(handle->accel_dev))) ?
1561 				    GET_FCU_CSR(handle, FCU_AE_LOADED_C4XXX) :
1562 				    (fcu_sts >> FCU_LOADED_AE_POS);
1563 				if (loaded_aes & (1 << i))
1564 					break;
1565 			}
1566 		} while (retry++ < FW_AUTH_MAX_RETRY);
1567 		if (retry > FW_AUTH_MAX_RETRY) {
1568 			pr_err("QAT: firmware load failed timeout %x\n", retry);
1569 			return EINVAL;
1570 		}
1571 	}
1572 	return 0;
1573 }
1574 
1575 static int
1576 qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
1577 		      const void *addr_ptr,
1578 		      int mem_size)
1579 {
1580 	struct icp_qat_suof_handle *suof_handle;
1581 
1582 	suof_handle = malloc(sizeof(*suof_handle), M_QAT, M_WAITOK | M_ZERO);
1583 	handle->sobj_handle = suof_handle;
1584 	if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
1585 		qat_uclo_del_suof(handle);
1586 		pr_err("QAT: map SUOF failed\n");
1587 		return EINVAL;
1588 	}
1589 	return 0;
1590 }
1591 
1592 int
1593 qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
1594 		   const void *addr_ptr,
1595 		   int mem_size)
1596 {
1597 	struct icp_qat_fw_auth_desc *desc = NULL;
1598 	struct icp_firml_dram_desc img_desc;
1599 	int status = 0;
1600 
1601 	if (handle->fw_auth) {
1602 		status = qat_uclo_map_auth_fw(
1603 		    handle, addr_ptr, mem_size, &img_desc, &desc);
1604 		if (!status)
1605 			status = qat_uclo_auth_fw(handle, desc);
1606 
1607 		qat_uclo_simg_free(handle, &img_desc);
1608 	} else {
1609 		if (pci_get_device(GET_DEV(handle->accel_dev)) ==
1610 		    ADF_C3XXX_PCI_DEVICE_ID) {
1611 			pr_err("QAT: C3XXX doesn't support unsigned MMP\n");
1612 			return EINVAL;
1613 		}
1614 		status = qat_uclo_wr_sram_by_words(handle,
1615 						   handle->hal_sram_offset,
1616 						   addr_ptr,
1617 						   mem_size);
1618 	}
1619 	return status;
1620 }
1621 
1622 static int
1623 qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
1624 		     const void *addr_ptr,
1625 		     int mem_size)
1626 {
1627 	struct icp_qat_uof_filehdr *filehdr;
1628 	struct icp_qat_uclo_objhandle *objhdl;
1629 
1630 	objhdl = malloc(sizeof(*objhdl), M_QAT, M_WAITOK | M_ZERO);
1631 	objhdl->obj_buf = malloc(mem_size, M_QAT, M_WAITOK);
1632 	bcopy(addr_ptr, objhdl->obj_buf, mem_size);
1633 	filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
1634 	if (qat_uclo_check_uof_format(filehdr))
1635 		goto out_objhdr_err;
1636 	objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf,
1637 					     filehdr,
1638 					     ICP_QAT_UOF_OBJS);
1639 	if (!objhdl->obj_hdr) {
1640 		pr_err("QAT: object file chunk is null\n");
1641 		goto out_objhdr_err;
1642 	}
1643 	handle->obj_handle = objhdl;
1644 	if (qat_uclo_parse_uof_obj(handle))
1645 		goto out_overlay_obj_err;
1646 	return 0;
1647 
1648 out_overlay_obj_err:
1649 	handle->obj_handle = NULL;
1650 	free(objhdl->obj_hdr, M_QAT);
1651 out_objhdr_err:
1652 	free(objhdl->obj_buf, M_QAT);
1653 	free(objhdl, M_QAT);
1654 	return ENOMEM;
1655 }
1656 
1657 static int
1658 qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle,
1659 			  const struct icp_qat_mof_file_hdr *mof_ptr,
1660 			  u32 mof_size)
1661 {
1662 	unsigned int checksum = 0;
1663 	unsigned int min_ver_offset = 0;
1664 	struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
1665 
1666 	mobj_handle->file_id = ICP_QAT_MOF_FID;
1667 	mobj_handle->mof_buf = (const char *)mof_ptr;
1668 	mobj_handle->mof_size = mof_size;
1669 
1670 	min_ver_offset =
1671 	    mof_size - offsetof(struct icp_qat_mof_file_hdr, min_ver);
1672 	checksum = qat_uclo_calc_str_checksum((const char *)&mof_ptr->min_ver,
1673 					      min_ver_offset);
1674 	if (checksum != mof_ptr->checksum) {
1675 		pr_err("QAT: incorrect MOF checksum\n");
1676 		return EINVAL;
1677 	}
1678 	mobj_handle->checksum = mof_ptr->checksum;
1679 	mobj_handle->min_ver = mof_ptr->min_ver;
1680 	mobj_handle->maj_ver = mof_ptr->maj_ver;
1681 	return 0;
1682 }
1683 
1684 void
1685 qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle)
1686 {
1687 	struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
1688 
1689 	free(mobj_handle->obj_table.obj_hdr, M_QAT);
1690 	mobj_handle->obj_table.obj_hdr = NULL;
1691 	free(handle->mobj_handle, M_QAT);
1692 	handle->mobj_handle = NULL;
1693 }
1694 
1695 static int
1696 qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle,
1697 			     const char *obj_name,
1698 			     const char **obj_ptr,
1699 			     unsigned int *obj_size)
1700 {
1701 	unsigned int i;
1702 	struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr;
1703 
1704 	for (i = 0; i < mobj_handle->obj_table.num_objs; i++) {
1705 		if (!strncmp(obj_hdr[i].obj_name,
1706 			     obj_name,
1707 			     ICP_QAT_SUOF_OBJ_NAME_LEN)) {
1708 			*obj_ptr = obj_hdr[i].obj_buf;
1709 			*obj_size = obj_hdr[i].obj_size;
1710 			break;
1711 		}
1712 	}
1713 
1714 	if (i >= mobj_handle->obj_table.num_objs) {
1715 		pr_err("QAT: object %s is not found inside MOF\n", obj_name);
1716 		return EFAULT;
1717 	}
1718 	return 0;
1719 }
1720 
1721 static int
1722 qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle,
1723 			  struct icp_qat_mof_objhdr *mobj_hdr,
1724 			  struct icp_qat_mof_obj_chunkhdr *obj_chunkhdr)
1725 {
1726 	if ((strncmp((char *)obj_chunkhdr->chunk_id,
1727 		     ICP_QAT_UOF_IMAG,
1728 		     ICP_QAT_MOF_OBJ_CHUNKID_LEN)) == 0) {
1729 		mobj_hdr->obj_buf =
1730 		    (const char *)((unsigned long)obj_chunkhdr->offset +
1731 				   mobj_handle->uobjs_hdr);
1732 	} else if ((strncmp((char *)(obj_chunkhdr->chunk_id),
1733 			    ICP_QAT_SUOF_IMAG,
1734 			    ICP_QAT_MOF_OBJ_CHUNKID_LEN)) == 0) {
1735 		mobj_hdr->obj_buf =
1736 		    (const char *)((unsigned long)obj_chunkhdr->offset +
1737 				   mobj_handle->sobjs_hdr);
1738 
1739 	} else {
1740 		pr_err("QAT: unsupported chunk id\n");
1741 		return EINVAL;
1742 	}
1743 	mobj_hdr->obj_size = (unsigned int)obj_chunkhdr->size;
1744 	mobj_hdr->obj_name =
1745 	    (char *)(obj_chunkhdr->name + mobj_handle->sym_str);
1746 	return 0;
1747 }
1748 
1749 static int
1750 qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
1751 {
1752 	struct icp_qat_mof_objhdr *mof_obj_hdr;
1753 	const struct icp_qat_mof_obj_hdr *uobj_hdr;
1754 	const struct icp_qat_mof_obj_hdr *sobj_hdr;
1755 	struct icp_qat_mof_obj_chunkhdr *uobj_chunkhdr;
1756 	struct icp_qat_mof_obj_chunkhdr *sobj_chunkhdr;
1757 	unsigned int uobj_chunk_num = 0, sobj_chunk_num = 0;
1758 	unsigned int *valid_chunks = 0;
1759 	int ret, i;
1760 
1761 	uobj_hdr = (const struct icp_qat_mof_obj_hdr *)mobj_handle->uobjs_hdr;
1762 	sobj_hdr = (const struct icp_qat_mof_obj_hdr *)mobj_handle->sobjs_hdr;
1763 	if (uobj_hdr)
1764 		uobj_chunk_num = uobj_hdr->num_chunks;
1765 	if (sobj_hdr)
1766 		sobj_chunk_num = sobj_hdr->num_chunks;
1767 
1768 	mof_obj_hdr = (struct icp_qat_mof_objhdr *)
1769 	    malloc((uobj_chunk_num + sobj_chunk_num) * sizeof(*mof_obj_hdr),
1770 		   M_QAT,
1771 		   M_WAITOK | M_ZERO);
1772 
1773 	mobj_handle->obj_table.obj_hdr = mof_obj_hdr;
1774 	valid_chunks = &mobj_handle->obj_table.num_objs;
1775 	uobj_chunkhdr =
1776 	    (struct icp_qat_mof_obj_chunkhdr *)((uintptr_t)uobj_hdr +
1777 						sizeof(*uobj_hdr));
1778 	sobj_chunkhdr =
1779 	    (struct icp_qat_mof_obj_chunkhdr *)((uintptr_t)sobj_hdr +
1780 						sizeof(*sobj_hdr));
1781 
1782 	/* map uof objects */
1783 	for (i = 0; i < uobj_chunk_num; i++) {
1784 		ret = qat_uclo_map_obj_from_mof(mobj_handle,
1785 						&mof_obj_hdr[*valid_chunks],
1786 						&uobj_chunkhdr[i]);
1787 		if (ret)
1788 			return ret;
1789 		(*valid_chunks)++;
1790 	}
1791 
1792 	/* map suof objects */
1793 	for (i = 0; i < sobj_chunk_num; i++) {
1794 		ret = qat_uclo_map_obj_from_mof(mobj_handle,
1795 						&mof_obj_hdr[*valid_chunks],
1796 						&sobj_chunkhdr[i]);
1797 		if (ret)
1798 			return ret;
1799 		(*valid_chunks)++;
1800 	}
1801 
1802 	if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunks) {
1803 		pr_err("QAT: inconsistent UOF/SUOF chunk amount\n");
1804 		return EINVAL;
1805 	}
1806 	return 0;
1807 }
1808 
1809 static void
1810 qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle *mobj_handle,
1811 			 struct icp_qat_mof_chunkhdr *mof_chunkhdr)
1812 {
1813 	char **sym_str = (char **)&mobj_handle->sym_str;
1814 	unsigned int *sym_size = &mobj_handle->sym_size;
1815 	struct icp_qat_mof_str_table *str_table_obj;
1816 
1817 	*sym_size = *(unsigned int *)(uintptr_t)(mof_chunkhdr->offset +
1818 						 mobj_handle->mof_buf);
1819 	*sym_str =
1820 	    (char *)(uintptr_t)(mobj_handle->mof_buf + mof_chunkhdr->offset +
1821 				sizeof(str_table_obj->tab_len));
1822 }
1823 
1824 static void
1825 qat_uclo_map_mof_chunk(struct icp_qat_mof_handle *mobj_handle,
1826 		       struct icp_qat_mof_chunkhdr *mof_chunkhdr)
1827 {
1828 	if (!strncmp(mof_chunkhdr->chunk_id,
1829 		     ICP_QAT_MOF_SYM_OBJS,
1830 		     ICP_QAT_MOF_OBJ_ID_LEN))
1831 		qat_uclo_map_mof_symobjs(mobj_handle, mof_chunkhdr);
1832 	else if (!strncmp(mof_chunkhdr->chunk_id,
1833 			  ICP_QAT_UOF_OBJS,
1834 			  ICP_QAT_MOF_OBJ_ID_LEN))
1835 		mobj_handle->uobjs_hdr =
1836 		    mobj_handle->mof_buf + (unsigned long)mof_chunkhdr->offset;
1837 	else if (!strncmp(mof_chunkhdr->chunk_id,
1838 			  ICP_QAT_SUOF_OBJS,
1839 			  ICP_QAT_MOF_OBJ_ID_LEN))
1840 		mobj_handle->sobjs_hdr =
1841 		    mobj_handle->mof_buf + (unsigned long)mof_chunkhdr->offset;
1842 }
1843 
1844 static int
1845 qat_uclo_check_mof_format(const struct icp_qat_mof_file_hdr *mof_hdr)
1846 {
1847 	int maj = mof_hdr->maj_ver & 0xff;
1848 	int min = mof_hdr->min_ver & 0xff;
1849 
1850 	if (mof_hdr->file_id != ICP_QAT_MOF_FID) {
1851 		pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id);
1852 		return EINVAL;
1853 	}
1854 
1855 	if (mof_hdr->num_chunks <= 0x1) {
1856 		pr_err("QAT: MOF chunk amount is incorrect\n");
1857 		return EINVAL;
1858 	}
1859 	if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) {
1860 		pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n",
1861 		       maj,
1862 		       min);
1863 		return EINVAL;
1864 	}
1865 	return 0;
1866 }
1867 
1868 static int
1869 qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle,
1870 		     const struct icp_qat_mof_file_hdr *mof_ptr,
1871 		     u32 mof_size,
1872 		     const char *obj_name,
1873 		     const char **obj_ptr,
1874 		     unsigned int *obj_size)
1875 {
1876 	struct icp_qat_mof_handle *mobj_handle;
1877 	struct icp_qat_mof_chunkhdr *mof_chunkhdr;
1878 	unsigned short chunks_num;
1879 	int ret;
1880 	unsigned int i;
1881 
1882 	if (mof_ptr->file_id == ICP_QAT_UOF_FID ||
1883 	    mof_ptr->file_id == ICP_QAT_SUOF_FID) {
1884 		if (obj_ptr)
1885 			*obj_ptr = (const char *)mof_ptr;
1886 		if (obj_size)
1887 			*obj_size = (unsigned int)mof_size;
1888 		return 0;
1889 	}
1890 	if (qat_uclo_check_mof_format(mof_ptr))
1891 		return EINVAL;
1892 	mobj_handle = malloc(sizeof(*mobj_handle), M_QAT, M_WAITOK | M_ZERO);
1893 	handle->mobj_handle = mobj_handle;
1894 	ret = qat_uclo_map_mof_file_hdr(handle, mof_ptr, mof_size);
1895 	if (ret)
1896 		return ret;
1897 	mof_chunkhdr = (struct icp_qat_mof_chunkhdr *)((uintptr_t)mof_ptr +
1898 						       sizeof(*mof_ptr));
1899 	chunks_num = mof_ptr->num_chunks;
1900 	/*Parse MOF file chunks*/
1901 	for (i = 0; i < chunks_num; i++)
1902 		qat_uclo_map_mof_chunk(mobj_handle, &mof_chunkhdr[i]);
1903 	/*All sym_objs uobjs and sobjs should be available*/
1904 	if (!mobj_handle->sym_str ||
1905 	    (!mobj_handle->uobjs_hdr && !mobj_handle->sobjs_hdr))
1906 		return EINVAL;
1907 	ret = qat_uclo_map_objs_from_mof(mobj_handle);
1908 	if (ret)
1909 		return ret;
1910 	/*Seek specified uof object in MOF*/
1911 	ret = qat_uclo_seek_obj_inside_mof(mobj_handle,
1912 					   obj_name,
1913 					   obj_ptr,
1914 					   obj_size);
1915 	if (ret)
1916 		return ret;
1917 	return 0;
1918 }
1919 
1920 int
1921 qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
1922 		 const void *addr_ptr,
1923 		 u32 mem_size,
1924 		 const char *obj_name)
1925 {
1926 	const char *obj_addr;
1927 	u32 obj_size;
1928 	int ret;
1929 
1930 	BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >
1931 		     (sizeof(handle->hal_handle->ae_mask) * 8));
1932 
1933 	if (!handle || !addr_ptr || mem_size < 24)
1934 		return EINVAL;
1935 
1936 	if (obj_name) {
1937 		ret = qat_uclo_map_mof_obj(
1938 		    handle, addr_ptr, mem_size, obj_name, &obj_addr, &obj_size);
1939 		if (ret)
1940 			return ret;
1941 	} else {
1942 		obj_addr = addr_ptr;
1943 		obj_size = mem_size;
1944 	}
1945 
1946 	return (handle->fw_auth) ?
1947 	    qat_uclo_map_suof_obj(handle, obj_addr, obj_size) :
1948 	    qat_uclo_map_uof_obj(handle, obj_addr, obj_size);
1949 }
1950 
1951 void
1952 qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle)
1953 {
1954 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
1955 	unsigned int a;
1956 	unsigned long ae_mask = handle->hal_handle->ae_mask;
1957 
1958 	if (handle->mobj_handle)
1959 		qat_uclo_del_mof(handle);
1960 	if (handle->sobj_handle)
1961 		qat_uclo_del_suof(handle);
1962 	if (!obj_handle)
1963 		return;
1964 
1965 	free(obj_handle->uword_buf, M_QAT);
1966 	for (a = 0; a < obj_handle->uimage_num; a++)
1967 		free(obj_handle->ae_uimage[a].page, M_QAT);
1968 
1969 	for_each_set_bit(a, &ae_mask, handle->hal_handle->ae_max_num)
1970 	{
1971 		qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
1972 	}
1973 
1974 	free(obj_handle->obj_hdr, M_QAT);
1975 	free(obj_handle->obj_buf, M_QAT);
1976 	free(obj_handle, M_QAT);
1977 	handle->obj_handle = NULL;
1978 }
1979 
1980 static void
1981 qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
1982 		     struct icp_qat_uclo_encap_page *encap_page,
1983 		     uint64_t *uword,
1984 		     unsigned int addr_p,
1985 		     unsigned int raddr,
1986 		     uint64_t fill)
1987 {
1988 	uint64_t uwrd = 0;
1989 	unsigned int i, addr;
1990 
1991 	if (!encap_page) {
1992 		*uword = fill;
1993 		return;
1994 	}
1995 	addr = (encap_page->page_region) ? raddr : addr_p;
1996 	for (i = 0; i < encap_page->uwblock_num; i++) {
1997 		if (addr >= encap_page->uwblock[i].start_addr &&
1998 		    addr <= encap_page->uwblock[i].start_addr +
1999 			    encap_page->uwblock[i].words_num - 1) {
2000 			addr -= encap_page->uwblock[i].start_addr;
2001 			addr *= obj_handle->uword_in_bytes;
2002 			memcpy(&uwrd,
2003 			       (void *)(((uintptr_t)encap_page->uwblock[i]
2004 					     .micro_words) +
2005 					addr),
2006 			       obj_handle->uword_in_bytes);
2007 			uwrd = uwrd & 0xbffffffffffull;
2008 		}
2009 	}
2010 	*uword = uwrd;
2011 	if (*uword == INVLD_UWORD)
2012 		*uword = fill;
2013 }
2014 
2015 static void
2016 qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
2017 			    struct icp_qat_uclo_encap_page *encap_page,
2018 			    unsigned int ae)
2019 {
2020 	unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
2021 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2022 	uint64_t fill_pat;
2023 
2024 	/* load the page starting at appropriate ustore address */
2025 	/* get fill-pattern from an image -- they are all the same */
2026 	memcpy(&fill_pat,
2027 	       obj_handle->ae_uimage[0].img_ptr->fill_pattern,
2028 	       sizeof(uint64_t));
2029 	uw_physical_addr = encap_page->beg_addr_p;
2030 	uw_relative_addr = 0;
2031 	words_num = encap_page->micro_words_num;
2032 	while (words_num) {
2033 		if (words_num < UWORD_CPYBUF_SIZE)
2034 			cpylen = words_num;
2035 		else
2036 			cpylen = UWORD_CPYBUF_SIZE;
2037 
2038 		/* load the buffer */
2039 		for (i = 0; i < cpylen; i++)
2040 			qat_uclo_fill_uwords(obj_handle,
2041 					     encap_page,
2042 					     &obj_handle->uword_buf[i],
2043 					     uw_physical_addr + i,
2044 					     uw_relative_addr + i,
2045 					     fill_pat);
2046 
2047 		if (obj_handle->ae_data[ae].shareable_ustore)
2048 			/* copy the buffer to ustore */
2049 			qat_hal_wr_coalesce_uwords(handle,
2050 						   (unsigned char)ae,
2051 						   uw_physical_addr,
2052 						   cpylen,
2053 						   obj_handle->uword_buf);
2054 		else
2055 			/* copy the buffer to ustore */
2056 			qat_hal_wr_uwords(handle,
2057 					  (unsigned char)ae,
2058 					  uw_physical_addr,
2059 					  cpylen,
2060 					  obj_handle->uword_buf);
2061 		uw_physical_addr += cpylen;
2062 		uw_relative_addr += cpylen;
2063 		words_num -= cpylen;
2064 	}
2065 }
2066 
2067 static void
2068 qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
2069 			struct icp_qat_uof_image *image)
2070 {
2071 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2072 	unsigned int ctx_mask, s;
2073 	struct icp_qat_uclo_page *page;
2074 	unsigned char ae = 0;
2075 	int ctx;
2076 	struct icp_qat_uclo_aedata *aed;
2077 	unsigned long ae_mask = handle->hal_handle->ae_mask;
2078 
2079 	if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
2080 		ctx_mask = 0xff;
2081 	else
2082 		ctx_mask = 0x55;
2083 	/* load the default page and set assigned CTX PC
2084 	 * to the entrypoint address
2085 	 */
2086 	for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
2087 	{
2088 		unsigned long cfg_ae_mask = handle->cfg_ae_mask;
2089 		unsigned long ae_assigned = image->ae_assigned;
2090 
2091 		if (!test_bit(ae, &cfg_ae_mask))
2092 			continue;
2093 
2094 		if (!test_bit(ae, &ae_assigned))
2095 			continue;
2096 
2097 		aed = &obj_handle->ae_data[ae];
2098 		/* find the slice to which this image is assigned */
2099 		for (s = 0; s < aed->slice_num; s++) {
2100 			if (image->ctx_assigned &
2101 			    aed->ae_slices[s].ctx_mask_assigned)
2102 				break;
2103 		}
2104 		if (s >= aed->slice_num)
2105 			continue;
2106 		page = aed->ae_slices[s].page;
2107 		if (!page->encap_page->def_page)
2108 			continue;
2109 		qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
2110 
2111 		page = aed->ae_slices[s].page;
2112 		for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
2113 			aed->ae_slices[s].cur_page[ctx] =
2114 			    (ctx_mask & (1 << ctx)) ? page : NULL;
2115 		qat_hal_set_live_ctx(handle,
2116 				     (unsigned char)ae,
2117 				     image->ctx_assigned);
2118 		qat_hal_set_pc(handle,
2119 			       (unsigned char)ae,
2120 			       image->ctx_assigned,
2121 			       image->entry_address);
2122 	}
2123 }
2124 
2125 static int
2126 qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
2127 {
2128 	unsigned int i;
2129 	struct icp_qat_fw_auth_desc *desc = NULL;
2130 	struct icp_firml_dram_desc img_desc;
2131 	struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
2132 	struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
2133 
2134 	for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
2135 		if (qat_uclo_map_auth_fw(handle,
2136 					 (const char *)simg_hdr[i].simg_buf,
2137 					 (unsigned int)(simg_hdr[i].simg_len),
2138 					 &img_desc,
2139 					 &desc))
2140 			goto wr_err;
2141 		if (qat_uclo_auth_fw(handle, desc))
2142 			goto wr_err;
2143 		if (qat_uclo_load_fw(handle, desc))
2144 			goto wr_err;
2145 		qat_uclo_simg_free(handle, &img_desc);
2146 	}
2147 	return 0;
2148 wr_err:
2149 	qat_uclo_simg_free(handle, &img_desc);
2150 	return -EINVAL;
2151 }
2152 
2153 static int
2154 qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
2155 {
2156 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2157 	unsigned int i;
2158 
2159 	if (qat_uclo_init_globals(handle))
2160 		return EINVAL;
2161 	for (i = 0; i < obj_handle->uimage_num; i++) {
2162 		if (!obj_handle->ae_uimage[i].img_ptr)
2163 			return EINVAL;
2164 		if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
2165 			return EINVAL;
2166 		qat_uclo_wr_uimage_page(handle,
2167 					obj_handle->ae_uimage[i].img_ptr);
2168 	}
2169 	return 0;
2170 }
2171 
2172 int
2173 qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
2174 {
2175 	return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) :
2176 				   qat_uclo_wr_uof_img(handle);
2177 }
2178 
2179 int
2180 qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
2181 			 unsigned int cfg_ae_mask)
2182 {
2183 	if (!cfg_ae_mask)
2184 		return EINVAL;
2185 
2186 	handle->cfg_ae_mask = cfg_ae_mask;
2187 	return 0;
2188 }
2189