1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3
4 #define pr_fmt(fmt) "QAT: " fmt
5
6 #include <linux/align.h>
7 #include <linux/bitops.h>
8 #include <linux/slab.h>
9 #include <linux/ctype.h>
10 #include <linux/kernel.h>
11 #include <linux/delay.h>
12 #include <linux/pci_ids.h>
13 #include <linux/wordpart.h>
14 #include "adf_accel_devices.h"
15 #include "adf_common_drv.h"
16 #include "icp_qat_uclo.h"
17 #include "icp_qat_hal.h"
18 #include "icp_qat_fw_loader_handle.h"
19
20 #define UWORD_CPYBUF_SIZE 1024U
21 #define INVLD_UWORD 0xffffffffffull
22 #define PID_MINOR_REV 0xf
23 #define PID_MAJOR_REV (0xf << 4)
24
qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle * obj_handle,unsigned int ae,unsigned int image_num)25 static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
26 unsigned int ae, unsigned int image_num)
27 {
28 struct icp_qat_uclo_aedata *ae_data;
29 struct icp_qat_uclo_encapme *encap_image;
30 struct icp_qat_uclo_page *page = NULL;
31 struct icp_qat_uclo_aeslice *ae_slice = NULL;
32
33 ae_data = &obj_handle->ae_data[ae];
34 encap_image = &obj_handle->ae_uimage[image_num];
35 ae_slice = &ae_data->ae_slices[ae_data->slice_num];
36 ae_slice->encap_image = encap_image;
37
38 if (encap_image->img_ptr) {
39 ae_slice->ctx_mask_assigned =
40 encap_image->img_ptr->ctx_assigned;
41 ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
42 } else {
43 ae_slice->ctx_mask_assigned = 0;
44 }
45 ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
46 if (!ae_slice->region)
47 return -ENOMEM;
48 ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
49 if (!ae_slice->page)
50 goto out_err;
51 page = ae_slice->page;
52 page->encap_page = encap_image->page;
53 ae_slice->page->region = ae_slice->region;
54 ae_data->slice_num++;
55 return 0;
56 out_err:
57 kfree(ae_slice->region);
58 ae_slice->region = NULL;
59 return -ENOMEM;
60 }
61
qat_uclo_free_ae_data(struct icp_qat_uclo_aedata * ae_data)62 static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
63 {
64 unsigned int i;
65
66 if (!ae_data) {
67 pr_err("bad argument, ae_data is NULL\n");
68 return -EINVAL;
69 }
70
71 for (i = 0; i < ae_data->slice_num; i++) {
72 kfree(ae_data->ae_slices[i].region);
73 ae_data->ae_slices[i].region = NULL;
74 kfree(ae_data->ae_slices[i].page);
75 ae_data->ae_slices[i].page = NULL;
76 }
77 return 0;
78 }
79
qat_uclo_get_string(struct icp_qat_uof_strtable * str_table,unsigned int str_offset)80 static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
81 unsigned int str_offset)
82 {
83 if (!str_table->table_len || str_offset > str_table->table_len)
84 return NULL;
85 return (char *)(((uintptr_t)(str_table->strings)) + str_offset);
86 }
87
qat_uclo_check_uof_format(struct icp_qat_uof_filehdr * hdr)88 static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr)
89 {
90 int maj = hdr->maj_ver & 0xff;
91 int min = hdr->min_ver & 0xff;
92
93 if (hdr->file_id != ICP_QAT_UOF_FID) {
94 pr_err("Invalid header 0x%x\n", hdr->file_id);
95 return -EINVAL;
96 }
97 if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
98 pr_err("bad UOF version, major 0x%x, minor 0x%x\n", maj, min);
99 return -EINVAL;
100 }
101 return 0;
102 }
103
qat_uclo_check_suof_format(struct icp_qat_suof_filehdr * suof_hdr)104 static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr)
105 {
106 int maj = suof_hdr->maj_ver & 0xff;
107 int min = suof_hdr->min_ver & 0xff;
108
109 if (suof_hdr->file_id != ICP_QAT_SUOF_FID) {
110 pr_err("invalid header 0x%x\n", suof_hdr->file_id);
111 return -EINVAL;
112 }
113 if (suof_hdr->fw_type != 0) {
114 pr_err("unsupported firmware type\n");
115 return -EINVAL;
116 }
117 if (suof_hdr->num_chunks <= 0x1) {
118 pr_err("SUOF chunk amount is incorrect\n");
119 return -EINVAL;
120 }
121 if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) {
122 pr_err("bad SUOF version, major 0x%x, minor 0x%x\n", maj, min);
123 return -EINVAL;
124 }
125 return 0;
126 }
127
qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle * handle,unsigned int addr,unsigned int * val,unsigned int num_in_bytes)128 static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
129 unsigned int addr, unsigned int *val,
130 unsigned int num_in_bytes)
131 {
132 unsigned int outval;
133 unsigned char *ptr = (unsigned char *)val;
134
135 while (num_in_bytes) {
136 memcpy(&outval, ptr, 4);
137 SRAM_WRITE(handle, addr, outval);
138 num_in_bytes -= 4;
139 ptr += 4;
140 addr += 4;
141 }
142 }
143
qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int addr,unsigned int * val,unsigned int num_in_bytes)144 static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
145 unsigned char ae, unsigned int addr,
146 unsigned int *val,
147 unsigned int num_in_bytes)
148 {
149 unsigned int outval;
150 unsigned char *ptr = (unsigned char *)val;
151
152 addr >>= 0x2; /* convert to uword address */
153
154 while (num_in_bytes) {
155 memcpy(&outval, ptr, 4);
156 qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
157 num_in_bytes -= 4;
158 ptr += 4;
159 }
160 }
161
qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle * handle,unsigned char ae,struct icp_qat_uof_batch_init * umem_init_header)162 static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
163 unsigned char ae,
164 struct icp_qat_uof_batch_init
165 *umem_init_header)
166 {
167 struct icp_qat_uof_batch_init *umem_init;
168
169 if (!umem_init_header)
170 return;
171 umem_init = umem_init_header->next;
172 while (umem_init) {
173 unsigned int addr, *value, size;
174
175 ae = umem_init->ae;
176 addr = umem_init->addr;
177 value = umem_init->value;
178 size = umem_init->size;
179 qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
180 umem_init = umem_init->next;
181 }
182 }
183
184 static void
qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_batch_init ** base)185 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
186 struct icp_qat_uof_batch_init **base)
187 {
188 struct icp_qat_uof_batch_init *umem_init;
189
190 umem_init = *base;
191 while (umem_init) {
192 struct icp_qat_uof_batch_init *pre;
193
194 pre = umem_init;
195 umem_init = umem_init->next;
196 kfree(pre);
197 }
198 *base = NULL;
199 }
200
qat_uclo_parse_num(char * str,unsigned int * num)201 static int qat_uclo_parse_num(char *str, unsigned int *num)
202 {
203 unsigned long long ae;
204 char *end;
205
206 ae = simple_strtoull(str, &end, 10);
207 if (ae > UINT_MAX || str == end || (end - str) > 19)
208 return -EINVAL;
209 *num = (unsigned int)ae;
210 return 0;
211 }
212
qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_initmem * init_mem,unsigned int size_range,unsigned int * ae)213 static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
214 struct icp_qat_uof_initmem *init_mem,
215 unsigned int size_range, unsigned int *ae)
216 {
217 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
218 char *str;
219
220 if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
221 pr_err("initmem is out of range");
222 return -EINVAL;
223 }
224 if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
225 pr_err("Memory scope for init_mem error\n");
226 return -EINVAL;
227 }
228 str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
229 if (!str) {
230 pr_err("AE name assigned in UOF init table is NULL\n");
231 return -EINVAL;
232 }
233 if (qat_uclo_parse_num(str, ae)) {
234 pr_err("Parse num for AE number failed\n");
235 return -EINVAL;
236 }
237 if (*ae >= ICP_QAT_UCLO_MAX_AE) {
238 pr_err("ae %d out of range\n", *ae);
239 return -EINVAL;
240 }
241 return 0;
242 }
243
qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_initmem * init_mem,unsigned int ae,struct icp_qat_uof_batch_init ** init_tab_base)244 static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
245 *handle, struct icp_qat_uof_initmem
246 *init_mem, unsigned int ae,
247 struct icp_qat_uof_batch_init
248 **init_tab_base)
249 {
250 struct icp_qat_uof_batch_init *init_header, *tail;
251 struct icp_qat_uof_batch_init *mem_init, *tail_old;
252 struct icp_qat_uof_memvar_attr *mem_val_attr;
253 unsigned int i, flag = 0;
254
255 mem_val_attr =
256 (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem +
257 sizeof(struct icp_qat_uof_initmem));
258
259 init_header = *init_tab_base;
260 if (!init_header) {
261 init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
262 if (!init_header)
263 return -ENOMEM;
264 init_header->size = 1;
265 *init_tab_base = init_header;
266 flag = 1;
267 }
268 tail_old = init_header;
269 while (tail_old->next)
270 tail_old = tail_old->next;
271 tail = tail_old;
272 for (i = 0; i < init_mem->val_attr_num; i++) {
273 mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
274 if (!mem_init)
275 goto out_err;
276 mem_init->ae = ae;
277 mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
278 mem_init->value = &mem_val_attr->value;
279 mem_init->size = 4;
280 mem_init->next = NULL;
281 tail->next = mem_init;
282 tail = mem_init;
283 init_header->size += qat_hal_get_ins_num();
284 mem_val_attr++;
285 }
286 return 0;
287 out_err:
288 /* Do not free the list head unless we allocated it. */
289 tail_old = tail_old->next;
290 if (flag) {
291 kfree(*init_tab_base);
292 *init_tab_base = NULL;
293 }
294
295 while (tail_old) {
296 mem_init = tail_old->next;
297 kfree(tail_old);
298 tail_old = mem_init;
299 }
300 return -ENOMEM;
301 }
302
qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_initmem * init_mem)303 static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
304 struct icp_qat_uof_initmem *init_mem)
305 {
306 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
307 unsigned int ae;
308
309 if (qat_uclo_fetch_initmem_ae(handle, init_mem,
310 handle->chip_info->lm_size, &ae))
311 return -EINVAL;
312 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
313 &obj_handle->lm_init_tab[ae]))
314 return -EINVAL;
315 return 0;
316 }
317
qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_initmem * init_mem)318 static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
319 struct icp_qat_uof_initmem *init_mem)
320 {
321 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
322 unsigned int ae, ustore_size, uaddr, i;
323 struct icp_qat_uclo_aedata *aed;
324
325 ustore_size = obj_handle->ustore_phy_size;
326 if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
327 return -EINVAL;
328 if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
329 &obj_handle->umem_init_tab[ae]))
330 return -EINVAL;
331 /* set the highest ustore address referenced */
332 uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
333 aed = &obj_handle->ae_data[ae];
334 for (i = 0; i < aed->slice_num; i++) {
335 if (aed->ae_slices[i].encap_image->uwords_num < uaddr)
336 aed->ae_slices[i].encap_image->uwords_num = uaddr;
337 }
338 return 0;
339 }
340
qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_initmem * init_mem)341 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
342 struct icp_qat_uof_initmem *init_mem)
343 {
344 switch (init_mem->region) {
345 case ICP_QAT_UOF_LMEM_REGION:
346 if (qat_uclo_init_lmem_seg(handle, init_mem))
347 return -EINVAL;
348 break;
349 case ICP_QAT_UOF_UMEM_REGION:
350 if (qat_uclo_init_umem_seg(handle, init_mem))
351 return -EINVAL;
352 break;
353 default:
354 pr_err("initmem region error. region type=0x%x\n", init_mem->region);
355 return -EINVAL;
356 }
357 return 0;
358 }
359
qat_uclo_init_ustore(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uclo_encapme * image)360 static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
361 struct icp_qat_uclo_encapme *image)
362 {
363 unsigned int i;
364 struct icp_qat_uclo_encap_page *page;
365 struct icp_qat_uof_image *uof_image;
366 unsigned char ae;
367 unsigned int ustore_size;
368 unsigned int patt_pos;
369 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
370 unsigned long ae_mask = handle->hal_handle->ae_mask;
371 unsigned long cfg_ae_mask = handle->cfg_ae_mask;
372 u64 *fill_data;
373
374 uof_image = image->img_ptr;
375 fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(u64),
376 GFP_KERNEL);
377 if (!fill_data)
378 return -ENOMEM;
379 for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
380 memcpy(&fill_data[i], &uof_image->fill_pattern,
381 sizeof(u64));
382 page = image->page;
383
384 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
385 unsigned long ae_assigned = uof_image->ae_assigned;
386
387 if (!test_bit(ae, &ae_assigned))
388 continue;
389
390 if (!test_bit(ae, &cfg_ae_mask))
391 continue;
392
393 ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
394 patt_pos = page->beg_addr_p + page->micro_words_num;
395
396 qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
397 page->beg_addr_p, &fill_data[0]);
398 qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
399 ustore_size - patt_pos + 1,
400 &fill_data[page->beg_addr_p]);
401 }
402 kfree(fill_data);
403 return 0;
404 }
405
qat_uclo_init_memory(struct icp_qat_fw_loader_handle * handle)406 static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
407 {
408 int i, ae;
409 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
410 struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
411 unsigned long ae_mask = handle->hal_handle->ae_mask;
412
413 for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
414 if (initmem->num_in_bytes) {
415 if (qat_uclo_init_ae_memory(handle, initmem))
416 return -EINVAL;
417 }
418 initmem = (struct icp_qat_uof_initmem *)((uintptr_t)(
419 (uintptr_t)initmem +
420 sizeof(struct icp_qat_uof_initmem)) +
421 (sizeof(struct icp_qat_uof_memvar_attr) *
422 initmem->val_attr_num));
423 }
424
425 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
426 if (qat_hal_batch_wr_lm(handle, ae,
427 obj_handle->lm_init_tab[ae])) {
428 pr_err("fail to batch init lmem for AE %d\n", ae);
429 return -EINVAL;
430 }
431 qat_uclo_cleanup_batch_init_list(handle,
432 &obj_handle->lm_init_tab[ae]);
433 qat_uclo_batch_wr_umem(handle, ae,
434 obj_handle->umem_init_tab[ae]);
435 qat_uclo_cleanup_batch_init_list(handle,
436 &obj_handle->
437 umem_init_tab[ae]);
438 }
439 return 0;
440 }
441
qat_uclo_find_chunk(struct icp_qat_uof_objhdr * obj_hdr,char * chunk_id,void * cur)442 static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
443 char *chunk_id, void *cur)
444 {
445 int i;
446 struct icp_qat_uof_chunkhdr *chunk_hdr =
447 (struct icp_qat_uof_chunkhdr *)
448 ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
449
450 for (i = 0; i < obj_hdr->num_chunks; i++) {
451 if ((cur < (void *)&chunk_hdr[i]) &&
452 !strncmp(chunk_hdr[i].chunk_id, chunk_id,
453 ICP_QAT_UOF_OBJID_LEN)) {
454 return &chunk_hdr[i];
455 }
456 }
457 return NULL;
458 }
459
qat_uclo_calc_checksum(unsigned int reg,int ch)460 static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
461 {
462 int i;
463 unsigned int topbit = 1 << 0xF;
464 unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
465
466 reg ^= inbyte << 0x8;
467 for (i = 0; i < 0x8; i++) {
468 if (reg & topbit)
469 reg = (reg << 1) ^ 0x1021;
470 else
471 reg <<= 1;
472 }
473 return reg & 0xFFFF;
474 }
475
qat_uclo_calc_str_checksum(char * ptr,int num)476 static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
477 {
478 unsigned int chksum = 0;
479
480 if (ptr)
481 while (num--)
482 chksum = qat_uclo_calc_checksum(chksum, *ptr++);
483 return chksum;
484 }
485
486 static struct icp_qat_uclo_objhdr *
qat_uclo_map_chunk(char * buf,struct icp_qat_uof_filehdr * file_hdr,char * chunk_id)487 qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
488 char *chunk_id)
489 {
490 struct icp_qat_uof_filechunkhdr *file_chunk;
491 struct icp_qat_uclo_objhdr *obj_hdr;
492 char *chunk;
493 int i;
494
495 file_chunk = (struct icp_qat_uof_filechunkhdr *)
496 (buf + sizeof(struct icp_qat_uof_filehdr));
497 for (i = 0; i < file_hdr->num_chunks; i++) {
498 if (!strncmp(file_chunk->chunk_id, chunk_id,
499 ICP_QAT_UOF_OBJID_LEN)) {
500 chunk = buf + file_chunk->offset;
501 if (file_chunk->checksum != qat_uclo_calc_str_checksum(
502 chunk, file_chunk->size))
503 break;
504 obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
505 if (!obj_hdr)
506 break;
507 obj_hdr->file_buff = chunk;
508 obj_hdr->checksum = file_chunk->checksum;
509 obj_hdr->size = file_chunk->size;
510 return obj_hdr;
511 }
512 file_chunk++;
513 }
514 return NULL;
515 }
516
517 static int
qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj * encap_uof_obj,struct icp_qat_uof_image * image)518 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
519 struct icp_qat_uof_image *image)
520 {
521 struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
522 struct icp_qat_uof_objtable *neigh_reg_tab;
523 struct icp_qat_uof_code_page *code_page;
524
525 code_page = (struct icp_qat_uof_code_page *)
526 ((char *)image + sizeof(struct icp_qat_uof_image));
527 uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
528 code_page->uc_var_tab_offset);
529 imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
530 code_page->imp_var_tab_offset);
531 imp_expr_tab = (struct icp_qat_uof_objtable *)
532 (encap_uof_obj->beg_uof +
533 code_page->imp_expr_tab_offset);
534 if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
535 imp_expr_tab->entry_num) {
536 pr_err("UOF can't contain imported variable to be parsed\n");
537 return -EINVAL;
538 }
539 neigh_reg_tab = (struct icp_qat_uof_objtable *)
540 (encap_uof_obj->beg_uof +
541 code_page->neigh_reg_tab_offset);
542 if (neigh_reg_tab->entry_num) {
543 pr_err("UOF can't contain neighbor register table\n");
544 return -EINVAL;
545 }
546 if (image->numpages > 1) {
547 pr_err("UOF can't contain multiple pages\n");
548 return -EINVAL;
549 }
550 if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
551 pr_err("UOF can't use shared control store feature\n");
552 return -EFAULT;
553 }
554 if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
555 pr_err("UOF can't use reloadable feature\n");
556 return -EFAULT;
557 }
558 return 0;
559 }
560
qat_uclo_map_image_page(struct icp_qat_uof_encap_obj * encap_uof_obj,struct icp_qat_uof_image * img,struct icp_qat_uclo_encap_page * page)561 static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
562 *encap_uof_obj,
563 struct icp_qat_uof_image *img,
564 struct icp_qat_uclo_encap_page *page)
565 {
566 struct icp_qat_uof_code_page *code_page;
567 struct icp_qat_uof_code_area *code_area;
568 struct icp_qat_uof_objtable *uword_block_tab;
569 struct icp_qat_uof_uword_block *uwblock;
570 int i;
571
572 code_page = (struct icp_qat_uof_code_page *)
573 ((char *)img + sizeof(struct icp_qat_uof_image));
574 page->def_page = code_page->def_page;
575 page->page_region = code_page->page_region;
576 page->beg_addr_v = code_page->beg_addr_v;
577 page->beg_addr_p = code_page->beg_addr_p;
578 code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
579 code_page->code_area_offset);
580 page->micro_words_num = code_area->micro_words_num;
581 uword_block_tab = (struct icp_qat_uof_objtable *)
582 (encap_uof_obj->beg_uof +
583 code_area->uword_block_tab);
584 page->uwblock_num = uword_block_tab->entry_num;
585 uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
586 sizeof(struct icp_qat_uof_objtable));
587 page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
588 for (i = 0; i < uword_block_tab->entry_num; i++)
589 page->uwblock[i].micro_words =
590 (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
591 }
592
qat_uclo_map_uimage(struct icp_qat_uclo_objhandle * obj_handle,struct icp_qat_uclo_encapme * ae_uimage,int max_image)593 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
594 struct icp_qat_uclo_encapme *ae_uimage,
595 int max_image)
596 {
597 int i, j;
598 struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
599 struct icp_qat_uof_image *image;
600 struct icp_qat_uof_objtable *ae_regtab;
601 struct icp_qat_uof_objtable *init_reg_sym_tab;
602 struct icp_qat_uof_objtable *sbreak_tab;
603 struct icp_qat_uof_encap_obj *encap_uof_obj =
604 &obj_handle->encap_uof_obj;
605
606 for (j = 0; j < max_image; j++) {
607 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
608 ICP_QAT_UOF_IMAG, chunk_hdr);
609 if (!chunk_hdr)
610 break;
611 image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
612 chunk_hdr->offset);
613 ae_regtab = (struct icp_qat_uof_objtable *)
614 (image->reg_tab_offset +
615 obj_handle->obj_hdr->file_buff);
616 ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
617 ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
618 (((char *)ae_regtab) +
619 sizeof(struct icp_qat_uof_objtable));
620 init_reg_sym_tab = (struct icp_qat_uof_objtable *)
621 (image->init_reg_sym_tab +
622 obj_handle->obj_hdr->file_buff);
623 ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
624 ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
625 (((char *)init_reg_sym_tab) +
626 sizeof(struct icp_qat_uof_objtable));
627 sbreak_tab = (struct icp_qat_uof_objtable *)
628 (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
629 ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
630 ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
631 (((char *)sbreak_tab) +
632 sizeof(struct icp_qat_uof_objtable));
633 ae_uimage[j].img_ptr = image;
634 if (qat_uclo_check_image_compat(encap_uof_obj, image))
635 goto out_err;
636 ae_uimage[j].page =
637 kzalloc(sizeof(struct icp_qat_uclo_encap_page),
638 GFP_KERNEL);
639 if (!ae_uimage[j].page)
640 goto out_err;
641 qat_uclo_map_image_page(encap_uof_obj, image,
642 ae_uimage[j].page);
643 }
644 return j;
645 out_err:
646 for (i = 0; i < j; i++)
647 kfree(ae_uimage[i].page);
648 return 0;
649 }
650
qat_uclo_map_ae(struct icp_qat_fw_loader_handle * handle,int max_ae)651 static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
652 {
653 int i, ae;
654 int mflag = 0;
655 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
656 unsigned long ae_mask = handle->hal_handle->ae_mask;
657 unsigned long cfg_ae_mask = handle->cfg_ae_mask;
658
659 for_each_set_bit(ae, &ae_mask, max_ae) {
660 if (!test_bit(ae, &cfg_ae_mask))
661 continue;
662
663 for (i = 0; i < obj_handle->uimage_num; i++) {
664 unsigned long ae_assigned = obj_handle->ae_uimage[i].img_ptr->ae_assigned;
665
666 if (!test_bit(ae, &ae_assigned))
667 continue;
668 mflag = 1;
669 if (qat_uclo_init_ae_data(obj_handle, ae, i))
670 return -EINVAL;
671 }
672 }
673 if (!mflag) {
674 pr_err("uimage uses AE not set\n");
675 return -EINVAL;
676 }
677 return 0;
678 }
679
680 static struct icp_qat_uof_strtable *
qat_uclo_map_str_table(struct icp_qat_uclo_objhdr * obj_hdr,char * tab_name,struct icp_qat_uof_strtable * str_table)681 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
682 char *tab_name, struct icp_qat_uof_strtable *str_table)
683 {
684 struct icp_qat_uof_chunkhdr *chunk_hdr;
685
686 chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
687 obj_hdr->file_buff, tab_name, NULL);
688 if (chunk_hdr) {
689 int hdr_size;
690
691 memcpy(&str_table->table_len, obj_hdr->file_buff +
692 chunk_hdr->offset, sizeof(str_table->table_len));
693 hdr_size = (char *)&str_table->strings - (char *)str_table;
694 str_table->strings = (uintptr_t)obj_hdr->file_buff +
695 chunk_hdr->offset + hdr_size;
696 return str_table;
697 }
698 return NULL;
699 }
700
701 static void
qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj * encap_uof_obj,struct icp_qat_uclo_init_mem_table * init_mem_tab)702 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
703 struct icp_qat_uclo_init_mem_table *init_mem_tab)
704 {
705 struct icp_qat_uof_chunkhdr *chunk_hdr;
706
707 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
708 ICP_QAT_UOF_IMEM, NULL);
709 if (chunk_hdr) {
710 memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
711 chunk_hdr->offset, sizeof(unsigned int));
712 init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
713 (encap_uof_obj->beg_uof + chunk_hdr->offset +
714 sizeof(unsigned int));
715 }
716 }
717
718 static unsigned int
qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle * handle)719 qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle)
720 {
721 switch (handle->pci_dev->device) {
722 case PCI_DEVICE_ID_INTEL_QAT_DH895XCC:
723 return ICP_QAT_AC_895XCC_DEV_TYPE;
724 case PCI_DEVICE_ID_INTEL_QAT_C62X:
725 return ICP_QAT_AC_C62X_DEV_TYPE;
726 case PCI_DEVICE_ID_INTEL_QAT_C3XXX:
727 return ICP_QAT_AC_C3XXX_DEV_TYPE;
728 case PCI_DEVICE_ID_INTEL_QAT_4XXX:
729 case PCI_DEVICE_ID_INTEL_QAT_401XX:
730 case PCI_DEVICE_ID_INTEL_QAT_402XX:
731 case PCI_DEVICE_ID_INTEL_QAT_420XX:
732 return ICP_QAT_AC_4XXX_A_DEV_TYPE;
733 case PCI_DEVICE_ID_INTEL_QAT_6XXX:
734 return ICP_QAT_AC_6XXX_DEV_TYPE;
735 default:
736 pr_err("unsupported device 0x%x\n", handle->pci_dev->device);
737 return 0;
738 }
739 }
740
qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle * obj_handle)741 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
742 {
743 unsigned int maj_ver, prod_type = obj_handle->prod_type;
744
745 if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) {
746 pr_err("UOF type 0x%x doesn't match with platform 0x%x\n",
747 obj_handle->encap_uof_obj.obj_hdr->ac_dev_type,
748 prod_type);
749 return -EINVAL;
750 }
751 maj_ver = obj_handle->prod_rev & 0xff;
752 if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver ||
753 obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) {
754 pr_err("UOF majVer 0x%x out of range\n", maj_ver);
755 return -EINVAL;
756 }
757 return 0;
758 }
759
qat_uclo_init_reg(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx_mask,enum icp_qat_uof_regtype reg_type,unsigned short reg_addr,unsigned int value)760 static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
761 unsigned char ae, unsigned char ctx_mask,
762 enum icp_qat_uof_regtype reg_type,
763 unsigned short reg_addr, unsigned int value)
764 {
765 switch (reg_type) {
766 case ICP_GPA_ABS:
767 case ICP_GPB_ABS:
768 ctx_mask = 0;
769 fallthrough;
770 case ICP_GPA_REL:
771 case ICP_GPB_REL:
772 return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
773 reg_addr, value);
774 case ICP_SR_ABS:
775 case ICP_DR_ABS:
776 case ICP_SR_RD_ABS:
777 case ICP_DR_RD_ABS:
778 ctx_mask = 0;
779 fallthrough;
780 case ICP_SR_REL:
781 case ICP_DR_REL:
782 case ICP_SR_RD_REL:
783 case ICP_DR_RD_REL:
784 return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
785 reg_addr, value);
786 case ICP_SR_WR_ABS:
787 case ICP_DR_WR_ABS:
788 ctx_mask = 0;
789 fallthrough;
790 case ICP_SR_WR_REL:
791 case ICP_DR_WR_REL:
792 return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
793 reg_addr, value);
794 case ICP_NEIGH_REL:
795 return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
796 default:
797 pr_err("UOF uses not supported reg type 0x%x\n", reg_type);
798 return -EFAULT;
799 }
800 return 0;
801 }
802
qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle * handle,unsigned int ae,struct icp_qat_uclo_encapme * encap_ae)803 static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
804 unsigned int ae,
805 struct icp_qat_uclo_encapme *encap_ae)
806 {
807 unsigned int i;
808 unsigned char ctx_mask;
809 struct icp_qat_uof_init_regsym *init_regsym;
810
811 if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
812 ICP_QAT_UCLO_MAX_CTX)
813 ctx_mask = 0xff;
814 else
815 ctx_mask = 0x55;
816
817 for (i = 0; i < encap_ae->init_regsym_num; i++) {
818 unsigned int exp_res;
819
820 init_regsym = &encap_ae->init_regsym[i];
821 exp_res = init_regsym->value;
822 switch (init_regsym->init_type) {
823 case ICP_QAT_UOF_INIT_REG:
824 qat_uclo_init_reg(handle, ae, ctx_mask,
825 (enum icp_qat_uof_regtype)
826 init_regsym->reg_type,
827 (unsigned short)init_regsym->reg_addr,
828 exp_res);
829 break;
830 case ICP_QAT_UOF_INIT_REG_CTX:
831 /* check if ctx is appropriate for the ctxMode */
832 if (!((1 << init_regsym->ctx) & ctx_mask)) {
833 pr_err("invalid ctx num = 0x%x\n", init_regsym->ctx);
834 return -EINVAL;
835 }
836 qat_uclo_init_reg(handle, ae,
837 (unsigned char)
838 (1 << init_regsym->ctx),
839 (enum icp_qat_uof_regtype)
840 init_regsym->reg_type,
841 (unsigned short)init_regsym->reg_addr,
842 exp_res);
843 break;
844 case ICP_QAT_UOF_INIT_EXPR:
845 pr_err("INIT_EXPR feature not supported\n");
846 return -EINVAL;
847 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
848 pr_err("INIT_EXPR_ENDIAN_SWAP feature not supported\n");
849 return -EINVAL;
850 default:
851 break;
852 }
853 }
854 return 0;
855 }
856
qat_uclo_init_globals(struct icp_qat_fw_loader_handle * handle)857 static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
858 {
859 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
860 unsigned long ae_mask = handle->hal_handle->ae_mask;
861 struct icp_qat_uclo_aedata *aed;
862 unsigned int s, ae;
863
864 if (obj_handle->global_inited)
865 return 0;
866 if (obj_handle->init_mem_tab.entry_num) {
867 if (qat_uclo_init_memory(handle)) {
868 pr_err("initialize memory failed\n");
869 return -EINVAL;
870 }
871 }
872
873 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
874 aed = &obj_handle->ae_data[ae];
875 for (s = 0; s < aed->slice_num; s++) {
876 if (!aed->ae_slices[s].encap_image)
877 continue;
878 if (qat_uclo_init_reg_sym(handle, ae, aed->ae_slices[s].encap_image))
879 return -EINVAL;
880 }
881 }
882 obj_handle->global_inited = 1;
883 return 0;
884 }
885
qat_hal_set_modes(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uclo_objhandle * obj_handle,unsigned char ae,struct icp_qat_uof_image * uof_image)886 static int qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle,
887 struct icp_qat_uclo_objhandle *obj_handle,
888 unsigned char ae,
889 struct icp_qat_uof_image *uof_image)
890 {
891 unsigned char mode;
892 int ret;
893
894 mode = ICP_QAT_CTX_MODE(uof_image->ae_mode);
895 ret = qat_hal_set_ae_ctx_mode(handle, ae, mode);
896 if (ret) {
897 pr_err("qat_hal_set_ae_ctx_mode error\n");
898 return ret;
899 }
900 if (handle->chip_info->nn) {
901 mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
902 ret = qat_hal_set_ae_nn_mode(handle, ae, mode);
903 if (ret) {
904 pr_err("qat_hal_set_ae_nn_mode error\n");
905 return ret;
906 }
907 }
908 mode = ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode);
909 ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, mode);
910 if (ret) {
911 pr_err("qat_hal_set_ae_lm_mode LMEM0 error\n");
912 return ret;
913 }
914 mode = ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode);
915 ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, mode);
916 if (ret) {
917 pr_err("qat_hal_set_ae_lm_mode LMEM1 error\n");
918 return ret;
919 }
920 if (handle->chip_info->lm2lm3) {
921 mode = ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode);
922 ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, mode);
923 if (ret) {
924 pr_err("qat_hal_set_ae_lm_mode LMEM2 error\n");
925 return ret;
926 }
927 mode = ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode);
928 ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, mode);
929 if (ret) {
930 pr_err("qat_hal_set_ae_lm_mode LMEM3 error\n");
931 return ret;
932 }
933 mode = ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode);
934 qat_hal_set_ae_tindex_mode(handle, ae, mode);
935 }
936 return 0;
937 }
938
qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle * handle)939 static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
940 {
941 struct icp_qat_uof_image *uof_image;
942 struct icp_qat_uclo_aedata *ae_data;
943 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
944 unsigned long ae_mask = handle->hal_handle->ae_mask;
945 unsigned long cfg_ae_mask = handle->cfg_ae_mask;
946 unsigned char ae, s;
947 int error;
948
949 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
950 if (!test_bit(ae, &cfg_ae_mask))
951 continue;
952
953 ae_data = &obj_handle->ae_data[ae];
954 for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
955 ICP_QAT_UCLO_MAX_CTX); s++) {
956 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
957 continue;
958 uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
959 error = qat_hal_set_modes(handle, obj_handle, ae,
960 uof_image);
961 if (error)
962 return error;
963 }
964 }
965 return 0;
966 }
967
qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle * handle)968 static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
969 {
970 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
971 struct icp_qat_uclo_encapme *image;
972 int a;
973
974 for (a = 0; a < obj_handle->uimage_num; a++) {
975 image = &obj_handle->ae_uimage[a];
976 image->uwords_num = image->page->beg_addr_p +
977 image->page->micro_words_num;
978 }
979 }
980
qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle * handle)981 static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
982 {
983 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
984 unsigned int ae;
985
986 obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
987 obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
988 obj_handle->obj_hdr->file_buff;
989 obj_handle->uword_in_bytes = 6;
990 obj_handle->prod_type = qat_uclo_get_dev_type(handle);
991 obj_handle->prod_rev = PID_MAJOR_REV |
992 (PID_MINOR_REV & handle->hal_handle->revision_id);
993 if (qat_uclo_check_uof_compat(obj_handle)) {
994 pr_err("UOF incompatible\n");
995 return -EINVAL;
996 }
997 obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64),
998 GFP_KERNEL);
999 if (!obj_handle->uword_buf)
1000 return -ENOMEM;
1001 obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
1002 if (!obj_handle->obj_hdr->file_buff ||
1003 !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
1004 &obj_handle->str_table)) {
1005 pr_err("UOF doesn't have effective images\n");
1006 goto out_err;
1007 }
1008 obj_handle->uimage_num =
1009 qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
1010 ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
1011 if (!obj_handle->uimage_num)
1012 goto out_err;
1013 if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
1014 pr_err("Bad object\n");
1015 goto out_check_uof_aemask_err;
1016 }
1017 qat_uclo_init_uword_num(handle);
1018 qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
1019 &obj_handle->init_mem_tab);
1020 if (qat_uclo_set_ae_mode(handle))
1021 goto out_check_uof_aemask_err;
1022 return 0;
1023 out_check_uof_aemask_err:
1024 for (ae = 0; ae < obj_handle->uimage_num; ae++)
1025 kfree(obj_handle->ae_uimage[ae].page);
1026 out_err:
1027 kfree(obj_handle->uword_buf);
1028 return -EFAULT;
1029 }
1030
qat_uclo_simg_hdr2sign_len(struct icp_qat_fw_loader_handle * handle)1031 static unsigned int qat_uclo_simg_hdr2sign_len(struct icp_qat_fw_loader_handle *handle)
1032 {
1033 if (handle->chip_info->dual_sign)
1034 return ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN;
1035
1036 return ICP_QAT_AE_IMG_OFFSET(handle);
1037 }
1038
qat_uclo_simg_hdr2cont_len(struct icp_qat_fw_loader_handle * handle)1039 static unsigned int qat_uclo_simg_hdr2cont_len(struct icp_qat_fw_loader_handle *handle)
1040 {
1041 if (handle->chip_info->dual_sign)
1042 return ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN + ICP_QAT_DUALSIGN_MISC_INFO_LEN;
1043
1044 return ICP_QAT_AE_IMG_OFFSET(handle);
1045 }
1046
qat_uclo_simg_fw_type(struct icp_qat_fw_loader_handle * handle,void * img_ptr)1047 static unsigned int qat_uclo_simg_fw_type(struct icp_qat_fw_loader_handle *handle, void *img_ptr)
1048 {
1049 struct icp_qat_css_hdr *hdr = img_ptr;
1050 char *fw_hdr = img_ptr;
1051 unsigned int offset;
1052
1053 if (handle->chip_info->dual_sign) {
1054 offset = qat_uclo_simg_hdr2sign_len(handle) + ICP_QAT_DUALSIGN_FW_TYPE_LEN;
1055 return *(fw_hdr + offset);
1056 }
1057
1058 return hdr->fw_type;
1059 }
1060
qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle * handle,struct icp_qat_suof_filehdr * suof_ptr,int suof_size)1061 static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle,
1062 struct icp_qat_suof_filehdr *suof_ptr,
1063 int suof_size)
1064 {
1065 unsigned int check_sum = 0;
1066 unsigned int min_ver_offset = 0;
1067 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1068
1069 suof_handle->file_id = ICP_QAT_SUOF_FID;
1070 suof_handle->suof_buf = (char *)suof_ptr;
1071 suof_handle->suof_size = suof_size;
1072 min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr,
1073 min_ver);
1074 check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver,
1075 min_ver_offset);
1076 if (check_sum != suof_ptr->check_sum) {
1077 pr_err("incorrect SUOF checksum\n");
1078 return -EINVAL;
1079 }
1080 suof_handle->check_sum = suof_ptr->check_sum;
1081 suof_handle->min_ver = suof_ptr->min_ver;
1082 suof_handle->maj_ver = suof_ptr->maj_ver;
1083 suof_handle->fw_type = suof_ptr->fw_type;
1084 return 0;
1085 }
1086
qat_uclo_map_simg(struct icp_qat_fw_loader_handle * handle,struct icp_qat_suof_img_hdr * suof_img_hdr,struct icp_qat_suof_chunk_hdr * suof_chunk_hdr)1087 static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle,
1088 struct icp_qat_suof_img_hdr *suof_img_hdr,
1089 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1090 {
1091 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1092 unsigned int offset = qat_uclo_simg_hdr2cont_len(handle);
1093 struct icp_qat_suof_objhdr *suof_objhdr;
1094 struct icp_qat_simg_ae_mode *ae_mode;
1095
1096 suof_img_hdr->simg_buf = (suof_handle->suof_buf +
1097 suof_chunk_hdr->offset +
1098 sizeof(*suof_objhdr));
1099 suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t)
1100 (suof_handle->suof_buf +
1101 suof_chunk_hdr->offset))->img_length;
1102
1103 suof_img_hdr->css_header = suof_img_hdr->simg_buf;
1104 suof_img_hdr->css_simg = suof_img_hdr->css_header + offset;
1105
1106 ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg);
1107 suof_img_hdr->ae_mask = ae_mode->ae_mask;
1108 suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name;
1109 suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data;
1110 suof_img_hdr->fw_type = ae_mode->fw_type;
1111 }
1112
1113 static void
qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle * suof_handle,struct icp_qat_suof_chunk_hdr * suof_chunk_hdr)1114 qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle,
1115 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr)
1116 {
1117 char **sym_str = (char **)&suof_handle->sym_str;
1118 unsigned int *sym_size = &suof_handle->sym_size;
1119 struct icp_qat_suof_strtable *str_table_obj;
1120
1121 *sym_size = *(unsigned int *)(uintptr_t)
1122 (suof_chunk_hdr->offset + suof_handle->suof_buf);
1123 *sym_str = (char *)(uintptr_t)
1124 (suof_handle->suof_buf + suof_chunk_hdr->offset +
1125 sizeof(str_table_obj->tab_length));
1126 }
1127
qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle * handle,struct icp_qat_suof_img_hdr * img_hdr)1128 static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle,
1129 struct icp_qat_suof_img_hdr *img_hdr)
1130 {
1131 struct icp_qat_simg_ae_mode *img_ae_mode = NULL;
1132 unsigned int prod_rev, maj_ver, prod_type;
1133
1134 prod_type = qat_uclo_get_dev_type(handle);
1135 img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg;
1136 prod_rev = PID_MAJOR_REV |
1137 (PID_MINOR_REV & handle->hal_handle->revision_id);
1138 if (img_ae_mode->dev_type != prod_type) {
1139 pr_err("incompatible product type %x\n", img_ae_mode->dev_type);
1140 return -EINVAL;
1141 }
1142 maj_ver = prod_rev & 0xff;
1143 if (maj_ver > img_ae_mode->devmax_ver ||
1144 maj_ver < img_ae_mode->devmin_ver) {
1145 pr_err("incompatible device majver 0x%x\n", maj_ver);
1146 return -EINVAL;
1147 }
1148 return 0;
1149 }
1150
qat_uclo_del_suof(struct icp_qat_fw_loader_handle * handle)1151 static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle)
1152 {
1153 struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
1154
1155 kfree(sobj_handle->img_table.simg_hdr);
1156 sobj_handle->img_table.simg_hdr = NULL;
1157 kfree(handle->sobj_handle);
1158 handle->sobj_handle = NULL;
1159 }
1160
qat_uclo_tail_img(struct icp_qat_suof_img_hdr * suof_img_hdr,unsigned int img_id,unsigned int num_simgs)1161 static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr,
1162 unsigned int img_id, unsigned int num_simgs)
1163 {
1164 struct icp_qat_suof_img_hdr img_header;
1165
1166 if (img_id != num_simgs - 1) {
1167 memcpy(&img_header, &suof_img_hdr[num_simgs - 1],
1168 sizeof(*suof_img_hdr));
1169 memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id],
1170 sizeof(*suof_img_hdr));
1171 memcpy(&suof_img_hdr[img_id], &img_header,
1172 sizeof(*suof_img_hdr));
1173 }
1174 }
1175
qat_uclo_map_suof(struct icp_qat_fw_loader_handle * handle,struct icp_qat_suof_filehdr * suof_ptr,int suof_size)1176 static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle,
1177 struct icp_qat_suof_filehdr *suof_ptr,
1178 int suof_size)
1179 {
1180 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle;
1181 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL;
1182 struct icp_qat_suof_img_hdr *suof_img_hdr = NULL;
1183 int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE;
1184 unsigned int i = 0;
1185 struct icp_qat_suof_img_hdr img_header;
1186
1187 if (!suof_ptr || suof_size == 0) {
1188 pr_err("input parameter SUOF pointer/size is NULL\n");
1189 return -EINVAL;
1190 }
1191 if (qat_uclo_check_suof_format(suof_ptr))
1192 return -EINVAL;
1193 ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size);
1194 if (ret)
1195 return ret;
1196 suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)
1197 ((uintptr_t)suof_ptr + sizeof(*suof_ptr));
1198
1199 qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr);
1200 suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1;
1201
1202 if (suof_handle->img_table.num_simgs != 0) {
1203 suof_img_hdr = kcalloc(suof_handle->img_table.num_simgs,
1204 sizeof(img_header),
1205 GFP_KERNEL);
1206 if (!suof_img_hdr)
1207 return -ENOMEM;
1208 suof_handle->img_table.simg_hdr = suof_img_hdr;
1209
1210 for (i = 0; i < suof_handle->img_table.num_simgs; i++) {
1211 qat_uclo_map_simg(handle, &suof_img_hdr[i],
1212 &suof_chunk_hdr[1 + i]);
1213 ret = qat_uclo_check_simg_compat(handle,
1214 &suof_img_hdr[i]);
1215 if (ret)
1216 return ret;
1217 suof_img_hdr[i].ae_mask &= handle->cfg_ae_mask;
1218 if ((suof_img_hdr[i].ae_mask & 0x1) != 0)
1219 ae0_img = i;
1220 }
1221
1222 if (!handle->chip_info->tgroup_share_ustore) {
1223 qat_uclo_tail_img(suof_img_hdr, ae0_img,
1224 suof_handle->img_table.num_simgs);
1225 }
1226 }
1227 return 0;
1228 }
1229
1230 #define ADD_ADDR(high, low) ((((u64)high) << 32) + low)
1231
qat_uclo_auth_fw(struct icp_qat_fw_loader_handle * handle,struct icp_qat_fw_auth_desc * desc)1232 static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle,
1233 struct icp_qat_fw_auth_desc *desc)
1234 {
1235 u32 fcu_sts, retry = 0;
1236 u32 fcu_ctl_csr, fcu_sts_csr;
1237 u32 fcu_dram_hi_csr, fcu_dram_lo_csr;
1238 u64 bus_addr;
1239
1240 bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low)
1241 - sizeof(struct icp_qat_auth_chunk);
1242
1243 fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
1244 fcu_sts_csr = handle->chip_info->fcu_sts_csr;
1245 fcu_dram_hi_csr = handle->chip_info->fcu_dram_addr_hi;
1246 fcu_dram_lo_csr = handle->chip_info->fcu_dram_addr_lo;
1247
1248 SET_CAP_CSR(handle, fcu_dram_hi_csr, bus_addr >> BITS_PER_TYPE(u32));
1249 SET_CAP_CSR(handle, fcu_dram_lo_csr, bus_addr);
1250 SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH);
1251
1252 do {
1253 msleep(FW_AUTH_WAIT_PERIOD);
1254 fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
1255 if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL)
1256 goto auth_fail;
1257 if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1))
1258 if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE)
1259 return 0;
1260 } while (retry++ < FW_AUTH_MAX_RETRY);
1261 auth_fail:
1262 pr_err("authentication error (FCU_STATUS = 0x%x),retry = %d\n",
1263 fcu_sts & FCU_AUTH_STS_MASK, retry);
1264 return -EINVAL;
1265 }
1266
qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle * handle,int imgid)1267 static bool qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle *handle,
1268 int imgid)
1269 {
1270 struct icp_qat_suof_handle *sobj_handle;
1271
1272 if (!handle->chip_info->tgroup_share_ustore)
1273 return false;
1274
1275 sobj_handle = (struct icp_qat_suof_handle *)handle->sobj_handle;
1276 if (handle->hal_handle->admin_ae_mask &
1277 sobj_handle->img_table.simg_hdr[imgid].ae_mask)
1278 return false;
1279
1280 return true;
1281 }
1282
qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle * handle,struct icp_qat_fw_auth_desc * desc)1283 static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle,
1284 struct icp_qat_fw_auth_desc *desc)
1285 {
1286 unsigned long ae_mask = handle->hal_handle->ae_mask;
1287 unsigned long desc_ae_mask = desc->ae_mask;
1288 u32 fcu_sts, ae_broadcast_mask = 0;
1289 u32 fcu_loaded_csr, ae_loaded;
1290 u32 fcu_sts_csr, fcu_ctl_csr;
1291 unsigned int ae, retry = 0;
1292
1293 if (handle->chip_info->tgroup_share_ustore) {
1294 fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
1295 fcu_sts_csr = handle->chip_info->fcu_sts_csr;
1296 fcu_loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
1297 } else {
1298 pr_err("Chip 0x%x doesn't support broadcast load\n", handle->pci_dev->device);
1299 return -EINVAL;
1300 }
1301
1302 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
1303 if (qat_hal_check_ae_active(handle, (unsigned char)ae)) {
1304 pr_err("Broadcast load failed. AE is not enabled or active.\n");
1305 return -EINVAL;
1306 }
1307
1308 if (test_bit(ae, &desc_ae_mask))
1309 ae_broadcast_mask |= 1 << ae;
1310 }
1311
1312 if (ae_broadcast_mask) {
1313 SET_CAP_CSR(handle, FCU_ME_BROADCAST_MASK_TYPE,
1314 ae_broadcast_mask);
1315
1316 SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_LOAD);
1317
1318 do {
1319 msleep(FW_AUTH_WAIT_PERIOD);
1320 fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
1321 fcu_sts &= FCU_AUTH_STS_MASK;
1322
1323 if (fcu_sts == FCU_STS_LOAD_FAIL) {
1324 pr_err("Broadcast load failed: 0x%x)\n", fcu_sts);
1325 return -EINVAL;
1326 } else if (fcu_sts == FCU_STS_LOAD_DONE) {
1327 ae_loaded = GET_CAP_CSR(handle, fcu_loaded_csr);
1328 ae_loaded >>= handle->chip_info->fcu_loaded_ae_pos;
1329
1330 if ((ae_loaded & ae_broadcast_mask) == ae_broadcast_mask)
1331 break;
1332 }
1333 } while (retry++ < FW_AUTH_MAX_RETRY);
1334
1335 if (retry > FW_AUTH_MAX_RETRY) {
1336 pr_err("broadcast load failed timeout %d\n", retry);
1337 return -EINVAL;
1338 }
1339 }
1340 return 0;
1341 }
1342
qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle * handle,struct icp_firml_dram_desc * dram_desc,unsigned int size)1343 static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle,
1344 struct icp_firml_dram_desc *dram_desc,
1345 unsigned int size)
1346 {
1347 void *vptr;
1348 dma_addr_t ptr;
1349
1350 vptr = dma_alloc_coherent(&handle->pci_dev->dev,
1351 size, &ptr, GFP_KERNEL);
1352 if (!vptr)
1353 return -ENOMEM;
1354 dram_desc->dram_base_addr_v = vptr;
1355 dram_desc->dram_bus_addr = ptr;
1356 dram_desc->dram_size = size;
1357 return 0;
1358 }
1359
qat_uclo_simg_free(struct icp_qat_fw_loader_handle * handle,struct icp_firml_dram_desc * dram_desc)1360 static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle,
1361 struct icp_firml_dram_desc *dram_desc)
1362 {
1363 if (handle && dram_desc && dram_desc->dram_base_addr_v) {
1364 dma_free_coherent(&handle->pci_dev->dev,
1365 (size_t)(dram_desc->dram_size),
1366 dram_desc->dram_base_addr_v,
1367 dram_desc->dram_bus_addr);
1368 }
1369
1370 if (dram_desc)
1371 memset(dram_desc, 0, sizeof(*dram_desc));
1372 }
1373
qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle * handle,struct icp_qat_fw_auth_desc ** desc)1374 static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle,
1375 struct icp_qat_fw_auth_desc **desc)
1376 {
1377 struct icp_firml_dram_desc dram_desc;
1378
1379 if (*desc) {
1380 dram_desc.dram_base_addr_v = *desc;
1381 dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *)
1382 (*desc))->chunk_bus_addr;
1383 dram_desc.dram_size = ((struct icp_qat_auth_chunk *)
1384 (*desc))->chunk_size;
1385 qat_uclo_simg_free(handle, &dram_desc);
1386 }
1387 }
1388
qat_uclo_check_image(struct icp_qat_fw_loader_handle * handle,void * image,unsigned int size,unsigned int fw_type)1389 static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle,
1390 void *image, unsigned int size,
1391 unsigned int fw_type)
1392 {
1393 char *fw_type_name = fw_type ? "MMP" : "AE";
1394 unsigned int css_dword_size = sizeof(u32);
1395 unsigned int header_len, simg_type;
1396 struct icp_qat_css_hdr *css_hdr;
1397
1398 if (handle->chip_info->fw_auth) {
1399 header_len = qat_uclo_simg_hdr2sign_len(handle);
1400 simg_type = qat_uclo_simg_fw_type(handle, image);
1401 css_hdr = image;
1402
1403 if (handle->chip_info->dual_sign) {
1404 if (css_hdr->module_type != ICP_QAT_DUALSIGN_MODULE_TYPE)
1405 goto err;
1406 if (css_hdr->header_len != ICP_QAT_DUALSIGN_HDR_LEN)
1407 goto err;
1408 if (css_hdr->header_ver != ICP_QAT_DUALSIGN_HDR_VER)
1409 goto err;
1410 } else {
1411 if (css_hdr->header_len * css_dword_size != header_len)
1412 goto err;
1413 if (css_hdr->size * css_dword_size != size)
1414 goto err;
1415 if (size <= header_len)
1416 goto err;
1417 }
1418
1419 if (fw_type != simg_type)
1420 goto err;
1421
1422 size -= header_len;
1423 }
1424
1425 if (fw_type == CSS_AE_FIRMWARE) {
1426 if (size < sizeof(struct icp_qat_simg_ae_mode *) +
1427 ICP_QAT_SIMG_AE_INIT_SEQ_LEN)
1428 goto err;
1429 if (size > ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN)
1430 goto err;
1431 } else if (fw_type == CSS_MMP_FIRMWARE) {
1432 if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN)
1433 goto err;
1434 } else {
1435 pr_err("Unsupported firmware type\n");
1436 return -EINVAL;
1437 }
1438 return 0;
1439
1440 err:
1441 pr_err("Invalid %s firmware image\n", fw_type_name);
1442 return -EINVAL;
1443 }
1444
qat_uclo_build_auth_desc_RSA(struct icp_qat_fw_loader_handle * handle,char * image,unsigned int size,struct icp_firml_dram_desc * dram_desc,unsigned int fw_type,struct icp_qat_fw_auth_desc ** desc)1445 static int qat_uclo_build_auth_desc_RSA(struct icp_qat_fw_loader_handle *handle,
1446 char *image, unsigned int size,
1447 struct icp_firml_dram_desc *dram_desc,
1448 unsigned int fw_type, struct icp_qat_fw_auth_desc **desc)
1449 {
1450 struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image;
1451 struct icp_qat_simg_ae_mode *simg_ae_mode;
1452 struct icp_qat_fw_auth_desc *auth_desc;
1453 char *virt_addr, *virt_base;
1454 u64 bus_addr;
1455
1456 virt_base = dram_desc->dram_base_addr_v;
1457 virt_base += sizeof(struct icp_qat_auth_chunk);
1458 bus_addr = dram_desc->dram_bus_addr + sizeof(struct icp_qat_auth_chunk);
1459 auth_desc = dram_desc->dram_base_addr_v;
1460 auth_desc->css_hdr_high = upper_32_bits(bus_addr);
1461 auth_desc->css_hdr_low = lower_32_bits(bus_addr);
1462 virt_addr = virt_base;
1463
1464 memcpy(virt_addr, image, sizeof(*css_hdr));
1465 /* pub key */
1466 bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) +
1467 sizeof(*css_hdr);
1468 virt_addr = virt_addr + sizeof(*css_hdr);
1469
1470 auth_desc->fwsk_pub_high = upper_32_bits(bus_addr);
1471 auth_desc->fwsk_pub_low = lower_32_bits(bus_addr);
1472
1473 memcpy(virt_addr, image + sizeof(*css_hdr), ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
1474 /* padding */
1475 memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)),
1476 0, ICP_QAT_CSS_FWSK_PAD_LEN(handle));
1477
1478 /* exponent */
1479 memcpy(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
1480 ICP_QAT_CSS_FWSK_PAD_LEN(handle), image + sizeof(*css_hdr) +
1481 ICP_QAT_CSS_FWSK_MODULUS_LEN(handle), sizeof(unsigned int));
1482
1483 /* signature */
1484 bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high,
1485 auth_desc->fwsk_pub_low) +
1486 ICP_QAT_CSS_FWSK_PUB_LEN(handle);
1487 virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(handle);
1488 auth_desc->signature_high = upper_32_bits(bus_addr);
1489 auth_desc->signature_low = lower_32_bits(bus_addr);
1490
1491 memcpy(virt_addr, image + sizeof(*css_hdr) + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) +
1492 ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle), ICP_QAT_CSS_SIGNATURE_LEN(handle));
1493
1494 bus_addr = ADD_ADDR(auth_desc->signature_high,
1495 auth_desc->signature_low) +
1496 ICP_QAT_CSS_SIGNATURE_LEN(handle);
1497 virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
1498
1499 auth_desc->img_high = upper_32_bits(bus_addr);
1500 auth_desc->img_low = lower_32_bits(bus_addr);
1501 auth_desc->img_len = size - qat_uclo_simg_hdr2sign_len(handle);
1502 if (bus_addr + auth_desc->img_len >
1503 dram_desc->dram_bus_addr + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) {
1504 pr_err("insufficient memory size for authentication data\n");
1505 qat_uclo_simg_free(handle, dram_desc);
1506 return -ENOMEM;
1507 }
1508
1509 memcpy(virt_addr, image + qat_uclo_simg_hdr2sign_len(handle), auth_desc->img_len);
1510 virt_addr = virt_base;
1511 /* AE firmware */
1512 if (fw_type == CSS_AE_FIRMWARE) {
1513 auth_desc->img_ae_mode_data_high = auth_desc->img_high;
1514 auth_desc->img_ae_mode_data_low = auth_desc->img_low;
1515 bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high,
1516 auth_desc->img_ae_mode_data_low) +
1517 sizeof(struct icp_qat_simg_ae_mode);
1518
1519 auth_desc->img_ae_init_data_high = upper_32_bits(bus_addr);
1520 auth_desc->img_ae_init_data_low = lower_32_bits(bus_addr);
1521 bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
1522 auth_desc->img_ae_insts_high = upper_32_bits(bus_addr);
1523 auth_desc->img_ae_insts_low = lower_32_bits(bus_addr);
1524 virt_addr += sizeof(struct icp_qat_css_hdr);
1525 virt_addr += ICP_QAT_CSS_FWSK_PUB_LEN(handle);
1526 virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle);
1527 simg_ae_mode = (struct icp_qat_simg_ae_mode *)(uintptr_t)virt_addr;
1528 auth_desc->ae_mask = simg_ae_mode->ae_mask & handle->cfg_ae_mask;
1529 } else {
1530 auth_desc->img_ae_insts_high = auth_desc->img_high;
1531 auth_desc->img_ae_insts_low = auth_desc->img_low;
1532 }
1533 *desc = auth_desc;
1534 return 0;
1535 }
1536
qat_uclo_build_auth_desc_dualsign(struct icp_qat_fw_loader_handle * handle,char * image,unsigned int size,struct icp_firml_dram_desc * dram_desc,unsigned int fw_type,struct icp_qat_fw_auth_desc ** desc)1537 static int qat_uclo_build_auth_desc_dualsign(struct icp_qat_fw_loader_handle *handle,
1538 char *image, unsigned int size,
1539 struct icp_firml_dram_desc *dram_desc,
1540 unsigned int fw_type,
1541 struct icp_qat_fw_auth_desc **desc)
1542 {
1543 struct icp_qat_simg_ae_mode *simg_ae_mode;
1544 struct icp_qat_fw_auth_desc *auth_desc;
1545 unsigned int chunk_offset, img_offset;
1546 u64 bus_addr, addr;
1547 char *virt_addr;
1548
1549 virt_addr = dram_desc->dram_base_addr_v;
1550 virt_addr += sizeof(struct icp_qat_auth_chunk);
1551 bus_addr = dram_desc->dram_bus_addr + sizeof(struct icp_qat_auth_chunk);
1552
1553 auth_desc = dram_desc->dram_base_addr_v;
1554 auth_desc->img_len = size - qat_uclo_simg_hdr2sign_len(handle);
1555 auth_desc->css_hdr_high = upper_32_bits(bus_addr);
1556 auth_desc->css_hdr_low = lower_32_bits(bus_addr);
1557 memcpy(virt_addr, image, ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN);
1558
1559 img_offset = ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN;
1560 chunk_offset = ICP_QAT_DUALSIGN_OPAQUE_HDR_ALIGN_LEN;
1561
1562 /* RSA pub key */
1563 addr = bus_addr + chunk_offset;
1564 auth_desc->fwsk_pub_high = upper_32_bits(addr);
1565 auth_desc->fwsk_pub_low = lower_32_bits(addr);
1566 memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_FWSK_MODULUS_LEN(handle));
1567
1568 img_offset += ICP_QAT_CSS_FWSK_MODULUS_LEN(handle);
1569 chunk_offset += ICP_QAT_CSS_FWSK_MODULUS_LEN(handle);
1570 /* RSA padding */
1571 memset(virt_addr + chunk_offset, 0, ICP_QAT_CSS_FWSK_PAD_LEN(handle));
1572
1573 chunk_offset += ICP_QAT_CSS_FWSK_PAD_LEN(handle);
1574 /* RSA exponent */
1575 memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle));
1576
1577 img_offset += ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
1578 chunk_offset += ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle);
1579 /* RSA signature */
1580 addr = bus_addr + chunk_offset;
1581 auth_desc->signature_high = upper_32_bits(addr);
1582 auth_desc->signature_low = lower_32_bits(addr);
1583 memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_SIGNATURE_LEN(handle));
1584
1585 img_offset += ICP_QAT_CSS_SIGNATURE_LEN(handle);
1586 chunk_offset += ICP_QAT_CSS_SIGNATURE_LEN(handle);
1587 /* XMSS pubkey */
1588 addr = bus_addr + chunk_offset;
1589 auth_desc->xmss_pubkey_high = upper_32_bits(addr);
1590 auth_desc->xmss_pubkey_low = lower_32_bits(addr);
1591 memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN);
1592
1593 img_offset += ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN;
1594 chunk_offset += ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN;
1595 /* XMSS signature */
1596 addr = bus_addr + chunk_offset;
1597 auth_desc->xmss_sig_high = upper_32_bits(addr);
1598 auth_desc->xmss_sig_low = lower_32_bits(addr);
1599 memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_DUALSIGN_XMSS_SIG_LEN);
1600
1601 img_offset += ICP_QAT_DUALSIGN_XMSS_SIG_LEN;
1602 chunk_offset += ICP_QAT_DUALSIGN_XMSS_SIG_ALIGN_LEN;
1603
1604 if (dram_desc->dram_size < (chunk_offset + auth_desc->img_len)) {
1605 pr_err("auth chunk memory size is not enough to store data\n");
1606 return -ENOMEM;
1607 }
1608
1609 /* Signed data */
1610 addr = bus_addr + chunk_offset;
1611 auth_desc->img_high = upper_32_bits(addr);
1612 auth_desc->img_low = lower_32_bits(addr);
1613 memcpy(virt_addr + chunk_offset, image + img_offset, auth_desc->img_len);
1614
1615 chunk_offset += ICP_QAT_DUALSIGN_MISC_INFO_LEN;
1616 /* AE firmware */
1617 if (fw_type == CSS_AE_FIRMWARE) {
1618 /* AE mode data */
1619 addr = bus_addr + chunk_offset;
1620 auth_desc->img_ae_mode_data_high = upper_32_bits(addr);
1621 auth_desc->img_ae_mode_data_low = lower_32_bits(addr);
1622 simg_ae_mode =
1623 (struct icp_qat_simg_ae_mode *)(virt_addr + chunk_offset);
1624 auth_desc->ae_mask = simg_ae_mode->ae_mask & handle->cfg_ae_mask;
1625
1626 chunk_offset += sizeof(struct icp_qat_simg_ae_mode);
1627 /* AE init seq */
1628 addr = bus_addr + chunk_offset;
1629 auth_desc->img_ae_init_data_high = upper_32_bits(addr);
1630 auth_desc->img_ae_init_data_low = lower_32_bits(addr);
1631
1632 chunk_offset += ICP_QAT_SIMG_AE_INIT_SEQ_LEN;
1633 /* AE instructions */
1634 addr = bus_addr + chunk_offset;
1635 auth_desc->img_ae_insts_high = upper_32_bits(addr);
1636 auth_desc->img_ae_insts_low = lower_32_bits(addr);
1637 } else {
1638 addr = bus_addr + chunk_offset;
1639 auth_desc->img_ae_insts_high = upper_32_bits(addr);
1640 auth_desc->img_ae_insts_low = lower_32_bits(addr);
1641 }
1642 *desc = auth_desc;
1643 return 0;
1644 }
1645
qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle * handle,char * image,unsigned int size,struct icp_qat_fw_auth_desc ** desc)1646 static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle,
1647 char *image, unsigned int size,
1648 struct icp_qat_fw_auth_desc **desc)
1649 {
1650 struct icp_qat_auth_chunk *auth_chunk;
1651 struct icp_firml_dram_desc img_desc;
1652 unsigned int simg_fw_type;
1653 int ret;
1654
1655 ret = qat_uclo_simg_alloc(handle, &img_desc, ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN);
1656 if (ret)
1657 return ret;
1658
1659 simg_fw_type = qat_uclo_simg_fw_type(handle, image);
1660 auth_chunk = img_desc.dram_base_addr_v;
1661 auth_chunk->chunk_size = img_desc.dram_size;
1662 auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr;
1663
1664 if (handle->chip_info->dual_sign)
1665 return qat_uclo_build_auth_desc_dualsign(handle, image, size, &img_desc,
1666 simg_fw_type, desc);
1667
1668 return qat_uclo_build_auth_desc_RSA(handle, image, size, &img_desc,
1669 simg_fw_type, desc);
1670 }
1671
qat_uclo_load_fw(struct icp_qat_fw_loader_handle * handle,struct icp_qat_fw_auth_desc * desc)1672 static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle,
1673 struct icp_qat_fw_auth_desc *desc)
1674 {
1675 unsigned long ae_mask = handle->hal_handle->ae_mask;
1676 u32 fcu_sts_csr, fcu_ctl_csr;
1677 u32 loaded_aes, loaded_csr;
1678 unsigned int i;
1679 u32 fcu_sts;
1680
1681 fcu_ctl_csr = handle->chip_info->fcu_ctl_csr;
1682 fcu_sts_csr = handle->chip_info->fcu_sts_csr;
1683 loaded_csr = handle->chip_info->fcu_loaded_ae_csr;
1684
1685 for_each_set_bit(i, &ae_mask, handle->hal_handle->ae_max_num) {
1686 int retry = 0;
1687
1688 if (!((desc->ae_mask >> i) & 0x1))
1689 continue;
1690 if (qat_hal_check_ae_active(handle, i)) {
1691 pr_err("AE %d is active\n", i);
1692 return -EINVAL;
1693 }
1694 SET_CAP_CSR(handle, fcu_ctl_csr,
1695 (FCU_CTRL_CMD_LOAD |
1696 (1 << FCU_CTRL_BROADCAST_POS) |
1697 (i << FCU_CTRL_AE_POS)));
1698
1699 do {
1700 msleep(FW_AUTH_WAIT_PERIOD);
1701 fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr);
1702 if ((fcu_sts & FCU_AUTH_STS_MASK) ==
1703 FCU_STS_LOAD_DONE) {
1704 loaded_aes = GET_CAP_CSR(handle, loaded_csr);
1705 loaded_aes >>= handle->chip_info->fcu_loaded_ae_pos;
1706 if (loaded_aes & (1 << i))
1707 break;
1708 }
1709 } while (retry++ < FW_AUTH_MAX_RETRY);
1710 if (retry > FW_AUTH_MAX_RETRY) {
1711 pr_err("firmware load failed timeout %x\n", retry);
1712 return -EINVAL;
1713 }
1714 }
1715 return 0;
1716 }
1717
qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle * handle,void * addr_ptr,int mem_size)1718 static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle,
1719 void *addr_ptr, int mem_size)
1720 {
1721 struct icp_qat_suof_handle *suof_handle;
1722
1723 suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL);
1724 if (!suof_handle)
1725 return -ENOMEM;
1726 handle->sobj_handle = suof_handle;
1727 if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) {
1728 qat_uclo_del_suof(handle);
1729 pr_err("map SUOF failed\n");
1730 return -EINVAL;
1731 }
1732 return 0;
1733 }
1734
qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle * handle,void * addr_ptr,int mem_size)1735 int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle,
1736 void *addr_ptr, int mem_size)
1737 {
1738 struct icp_qat_fw_auth_desc *desc = NULL;
1739 int status = 0;
1740 int ret;
1741
1742 ret = qat_uclo_check_image(handle, addr_ptr, mem_size, CSS_MMP_FIRMWARE);
1743 if (ret)
1744 return ret;
1745
1746 if (handle->chip_info->fw_auth) {
1747 status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc);
1748 if (!status)
1749 status = qat_uclo_auth_fw(handle, desc);
1750 qat_uclo_ummap_auth_fw(handle, &desc);
1751 } else {
1752 if (handle->chip_info->mmp_sram_size < mem_size) {
1753 pr_err("MMP size is too large: 0x%x\n", mem_size);
1754 return -EFBIG;
1755 }
1756 qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size);
1757 }
1758 return status;
1759 }
1760
qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle * handle,void * addr_ptr,int mem_size)1761 static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
1762 void *addr_ptr, int mem_size)
1763 {
1764 struct icp_qat_uof_filehdr *filehdr;
1765 struct icp_qat_uclo_objhandle *objhdl;
1766
1767 objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
1768 if (!objhdl)
1769 return -ENOMEM;
1770 objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
1771 if (!objhdl->obj_buf)
1772 goto out_objbuf_err;
1773 filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
1774 if (qat_uclo_check_uof_format(filehdr))
1775 goto out_objhdr_err;
1776 objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
1777 ICP_QAT_UOF_OBJS);
1778 if (!objhdl->obj_hdr) {
1779 pr_err("object file chunk is null\n");
1780 goto out_objhdr_err;
1781 }
1782 handle->obj_handle = objhdl;
1783 if (qat_uclo_parse_uof_obj(handle))
1784 goto out_overlay_obj_err;
1785 return 0;
1786
1787 out_overlay_obj_err:
1788 handle->obj_handle = NULL;
1789 kfree(objhdl->obj_hdr);
1790 out_objhdr_err:
1791 kfree(objhdl->obj_buf);
1792 out_objbuf_err:
1793 kfree(objhdl);
1794 return -ENOMEM;
1795 }
1796
qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle * handle,struct icp_qat_mof_file_hdr * mof_ptr,u32 mof_size)1797 static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle,
1798 struct icp_qat_mof_file_hdr *mof_ptr,
1799 u32 mof_size)
1800 {
1801 struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
1802 unsigned int min_ver_offset;
1803 unsigned int checksum;
1804
1805 mobj_handle->file_id = ICP_QAT_MOF_FID;
1806 mobj_handle->mof_buf = (char *)mof_ptr;
1807 mobj_handle->mof_size = mof_size;
1808
1809 min_ver_offset = mof_size - offsetof(struct icp_qat_mof_file_hdr,
1810 min_ver);
1811 checksum = qat_uclo_calc_str_checksum(&mof_ptr->min_ver,
1812 min_ver_offset);
1813 if (checksum != mof_ptr->checksum) {
1814 pr_err("incorrect MOF checksum\n");
1815 return -EINVAL;
1816 }
1817
1818 mobj_handle->checksum = mof_ptr->checksum;
1819 mobj_handle->min_ver = mof_ptr->min_ver;
1820 mobj_handle->maj_ver = mof_ptr->maj_ver;
1821 return 0;
1822 }
1823
qat_uclo_del_mof(struct icp_qat_fw_loader_handle * handle)1824 static void qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle)
1825 {
1826 struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle;
1827
1828 kfree(mobj_handle->obj_table.obj_hdr);
1829 mobj_handle->obj_table.obj_hdr = NULL;
1830 kfree(handle->mobj_handle);
1831 handle->mobj_handle = NULL;
1832 }
1833
qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle * mobj_handle,const char * obj_name,char ** obj_ptr,unsigned int * obj_size)1834 static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle,
1835 const char *obj_name, char **obj_ptr,
1836 unsigned int *obj_size)
1837 {
1838 struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr;
1839 unsigned int i;
1840
1841 for (i = 0; i < mobj_handle->obj_table.num_objs; i++) {
1842 if (!strncmp(obj_hdr[i].obj_name, obj_name,
1843 ICP_QAT_SUOF_OBJ_NAME_LEN)) {
1844 *obj_ptr = obj_hdr[i].obj_buf;
1845 *obj_size = obj_hdr[i].obj_size;
1846 return 0;
1847 }
1848 }
1849
1850 pr_err("object %s is not found inside MOF\n", obj_name);
1851 return -EINVAL;
1852 }
1853
qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle * mobj_handle,struct icp_qat_mof_objhdr * mobj_hdr,struct icp_qat_mof_obj_chunkhdr * obj_chunkhdr)1854 static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle,
1855 struct icp_qat_mof_objhdr *mobj_hdr,
1856 struct icp_qat_mof_obj_chunkhdr *obj_chunkhdr)
1857 {
1858 u8 *obj;
1859
1860 if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_UOF_IMAG,
1861 ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
1862 obj = mobj_handle->uobjs_hdr + obj_chunkhdr->offset;
1863 } else if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_SUOF_IMAG,
1864 ICP_QAT_MOF_OBJ_CHUNKID_LEN)) {
1865 obj = mobj_handle->sobjs_hdr + obj_chunkhdr->offset;
1866 } else {
1867 pr_err("unsupported chunk id\n");
1868 return -EINVAL;
1869 }
1870 mobj_hdr->obj_buf = obj;
1871 mobj_hdr->obj_size = (unsigned int)obj_chunkhdr->size;
1872 mobj_hdr->obj_name = obj_chunkhdr->name + mobj_handle->sym_str;
1873 return 0;
1874 }
1875
qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle * mobj_handle)1876 static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle)
1877 {
1878 struct icp_qat_mof_obj_chunkhdr *uobj_chunkhdr;
1879 struct icp_qat_mof_obj_chunkhdr *sobj_chunkhdr;
1880 struct icp_qat_mof_obj_hdr *uobj_hdr;
1881 struct icp_qat_mof_obj_hdr *sobj_hdr;
1882 struct icp_qat_mof_objhdr *mobj_hdr;
1883 unsigned int uobj_chunk_num = 0;
1884 unsigned int sobj_chunk_num = 0;
1885 unsigned int *valid_chunk;
1886 int ret, i;
1887
1888 uobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->uobjs_hdr;
1889 sobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->sobjs_hdr;
1890 if (uobj_hdr)
1891 uobj_chunk_num = uobj_hdr->num_chunks;
1892 if (sobj_hdr)
1893 sobj_chunk_num = sobj_hdr->num_chunks;
1894
1895 mobj_hdr = kcalloc(size_add(uobj_chunk_num, sobj_chunk_num),
1896 sizeof(*mobj_hdr), GFP_KERNEL);
1897 if (!mobj_hdr)
1898 return -ENOMEM;
1899
1900 mobj_handle->obj_table.obj_hdr = mobj_hdr;
1901 valid_chunk = &mobj_handle->obj_table.num_objs;
1902 uobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
1903 ((uintptr_t)uobj_hdr + sizeof(*uobj_hdr));
1904 sobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *)
1905 ((uintptr_t)sobj_hdr + sizeof(*sobj_hdr));
1906
1907 /* map uof objects */
1908 for (i = 0; i < uobj_chunk_num; i++) {
1909 ret = qat_uclo_map_obj_from_mof(mobj_handle,
1910 &mobj_hdr[*valid_chunk],
1911 &uobj_chunkhdr[i]);
1912 if (ret)
1913 return ret;
1914 (*valid_chunk)++;
1915 }
1916
1917 /* map suof objects */
1918 for (i = 0; i < sobj_chunk_num; i++) {
1919 ret = qat_uclo_map_obj_from_mof(mobj_handle,
1920 &mobj_hdr[*valid_chunk],
1921 &sobj_chunkhdr[i]);
1922 if (ret)
1923 return ret;
1924 (*valid_chunk)++;
1925 }
1926
1927 if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunk) {
1928 pr_err("inconsistent UOF/SUOF chunk amount\n");
1929 return -EINVAL;
1930 }
1931 return 0;
1932 }
1933
qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle * mobj_handle,struct icp_qat_mof_chunkhdr * mof_chunkhdr)1934 static void qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle *mobj_handle,
1935 struct icp_qat_mof_chunkhdr *mof_chunkhdr)
1936 {
1937 char **sym_str = (char **)&mobj_handle->sym_str;
1938 unsigned int *sym_size = &mobj_handle->sym_size;
1939 struct icp_qat_mof_str_table *str_table_obj;
1940
1941 *sym_size = *(unsigned int *)(uintptr_t)
1942 (mof_chunkhdr->offset + mobj_handle->mof_buf);
1943 *sym_str = (char *)(uintptr_t)
1944 (mobj_handle->mof_buf + mof_chunkhdr->offset +
1945 sizeof(str_table_obj->tab_len));
1946 }
1947
qat_uclo_map_mof_chunk(struct icp_qat_mof_handle * mobj_handle,struct icp_qat_mof_chunkhdr * mof_chunkhdr)1948 static void qat_uclo_map_mof_chunk(struct icp_qat_mof_handle *mobj_handle,
1949 struct icp_qat_mof_chunkhdr *mof_chunkhdr)
1950 {
1951 char *chunk_id = mof_chunkhdr->chunk_id;
1952
1953 if (!strncmp(chunk_id, ICP_QAT_MOF_SYM_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
1954 qat_uclo_map_mof_symobjs(mobj_handle, mof_chunkhdr);
1955 else if (!strncmp(chunk_id, ICP_QAT_UOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
1956 mobj_handle->uobjs_hdr = mobj_handle->mof_buf +
1957 mof_chunkhdr->offset;
1958 else if (!strncmp(chunk_id, ICP_QAT_SUOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN))
1959 mobj_handle->sobjs_hdr = mobj_handle->mof_buf +
1960 mof_chunkhdr->offset;
1961 }
1962
qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr * mof_hdr)1963 static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr *mof_hdr)
1964 {
1965 int maj = mof_hdr->maj_ver & 0xff;
1966 int min = mof_hdr->min_ver & 0xff;
1967
1968 if (mof_hdr->file_id != ICP_QAT_MOF_FID) {
1969 pr_err("invalid header 0x%x\n", mof_hdr->file_id);
1970 return -EINVAL;
1971 }
1972
1973 if (mof_hdr->num_chunks <= 0x1) {
1974 pr_err("MOF chunk amount is incorrect\n");
1975 return -EINVAL;
1976 }
1977 if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) {
1978 pr_err("bad MOF version, major 0x%x, minor 0x%x\n", maj, min);
1979 return -EINVAL;
1980 }
1981 return 0;
1982 }
1983
qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle * handle,struct icp_qat_mof_file_hdr * mof_ptr,u32 mof_size,const char * obj_name,char ** obj_ptr,unsigned int * obj_size)1984 static int qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle,
1985 struct icp_qat_mof_file_hdr *mof_ptr,
1986 u32 mof_size, const char *obj_name,
1987 char **obj_ptr, unsigned int *obj_size)
1988 {
1989 struct icp_qat_mof_chunkhdr *mof_chunkhdr;
1990 unsigned int file_id = mof_ptr->file_id;
1991 struct icp_qat_mof_handle *mobj_handle;
1992 unsigned short chunks_num;
1993 unsigned int i;
1994 int ret;
1995
1996 if (file_id == ICP_QAT_UOF_FID || file_id == ICP_QAT_SUOF_FID) {
1997 if (obj_ptr)
1998 *obj_ptr = (char *)mof_ptr;
1999 if (obj_size)
2000 *obj_size = mof_size;
2001 return 0;
2002 }
2003 if (qat_uclo_check_mof_format(mof_ptr))
2004 return -EINVAL;
2005
2006 mobj_handle = kzalloc(sizeof(*mobj_handle), GFP_KERNEL);
2007 if (!mobj_handle)
2008 return -ENOMEM;
2009
2010 handle->mobj_handle = mobj_handle;
2011 ret = qat_uclo_map_mof_file_hdr(handle, mof_ptr, mof_size);
2012 if (ret)
2013 return ret;
2014
2015 mof_chunkhdr = (void *)mof_ptr + sizeof(*mof_ptr);
2016 chunks_num = mof_ptr->num_chunks;
2017
2018 /* Parse MOF file chunks */
2019 for (i = 0; i < chunks_num; i++)
2020 qat_uclo_map_mof_chunk(mobj_handle, &mof_chunkhdr[i]);
2021
2022 /* All sym_objs uobjs and sobjs should be available */
2023 if (!mobj_handle->sym_str ||
2024 (!mobj_handle->uobjs_hdr && !mobj_handle->sobjs_hdr))
2025 return -EINVAL;
2026
2027 ret = qat_uclo_map_objs_from_mof(mobj_handle);
2028 if (ret)
2029 return ret;
2030
2031 /* Seek specified uof object in MOF */
2032 return qat_uclo_seek_obj_inside_mof(mobj_handle, obj_name,
2033 obj_ptr, obj_size);
2034 }
2035
qat_uclo_map_obj(struct icp_qat_fw_loader_handle * handle,void * addr_ptr,u32 mem_size,const char * obj_name)2036 int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle,
2037 void *addr_ptr, u32 mem_size, const char *obj_name)
2038 {
2039 char *obj_addr;
2040 u32 obj_size;
2041 int ret;
2042
2043 BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
2044 (sizeof(handle->hal_handle->ae_mask) * 8));
2045
2046 if (!handle || !addr_ptr || mem_size < 24)
2047 return -EINVAL;
2048
2049 if (obj_name) {
2050 ret = qat_uclo_map_mof_obj(handle, addr_ptr, mem_size, obj_name,
2051 &obj_addr, &obj_size);
2052 if (ret)
2053 return ret;
2054 } else {
2055 obj_addr = addr_ptr;
2056 obj_size = mem_size;
2057 }
2058
2059 return (handle->chip_info->fw_auth) ?
2060 qat_uclo_map_suof_obj(handle, obj_addr, obj_size) :
2061 qat_uclo_map_uof_obj(handle, obj_addr, obj_size);
2062 }
2063
qat_uclo_del_obj(struct icp_qat_fw_loader_handle * handle)2064 void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle)
2065 {
2066 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2067 unsigned int a;
2068
2069 if (handle->mobj_handle)
2070 qat_uclo_del_mof(handle);
2071 if (handle->sobj_handle)
2072 qat_uclo_del_suof(handle);
2073 if (!obj_handle)
2074 return;
2075
2076 kfree(obj_handle->uword_buf);
2077 for (a = 0; a < obj_handle->uimage_num; a++)
2078 kfree(obj_handle->ae_uimage[a].page);
2079
2080 for (a = 0; a < handle->hal_handle->ae_max_num; a++)
2081 qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
2082
2083 kfree(obj_handle->obj_hdr);
2084 kfree(obj_handle->obj_buf);
2085 kfree(obj_handle);
2086 handle->obj_handle = NULL;
2087 }
2088
qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle * obj_handle,struct icp_qat_uclo_encap_page * encap_page,u64 * uword,unsigned int addr_p,unsigned int raddr,u64 fill)2089 static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
2090 struct icp_qat_uclo_encap_page *encap_page,
2091 u64 *uword, unsigned int addr_p,
2092 unsigned int raddr, u64 fill)
2093 {
2094 unsigned int i, addr;
2095 u64 uwrd = 0;
2096
2097 if (!encap_page) {
2098 *uword = fill;
2099 return;
2100 }
2101 addr = (encap_page->page_region) ? raddr : addr_p;
2102 for (i = 0; i < encap_page->uwblock_num; i++) {
2103 if (addr >= encap_page->uwblock[i].start_addr &&
2104 addr <= encap_page->uwblock[i].start_addr +
2105 encap_page->uwblock[i].words_num - 1) {
2106 addr -= encap_page->uwblock[i].start_addr;
2107 addr *= obj_handle->uword_in_bytes;
2108 memcpy(&uwrd, (void *)(((uintptr_t)
2109 encap_page->uwblock[i].micro_words) + addr),
2110 obj_handle->uword_in_bytes);
2111 uwrd = uwrd & GENMASK_ULL(43, 0);
2112 }
2113 }
2114 *uword = uwrd;
2115 if (*uword == INVLD_UWORD)
2116 *uword = fill;
2117 }
2118
qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uclo_encap_page * encap_page,unsigned int ae)2119 static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
2120 struct icp_qat_uclo_encap_page
2121 *encap_page, unsigned int ae)
2122 {
2123 unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
2124 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2125 u64 fill_pat;
2126
2127 /* load the page starting at appropriate ustore address */
2128 /* get fill-pattern from an image -- they are all the same */
2129 memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
2130 sizeof(u64));
2131 uw_physical_addr = encap_page->beg_addr_p;
2132 uw_relative_addr = 0;
2133 words_num = encap_page->micro_words_num;
2134 while (words_num) {
2135 cpylen = min(words_num, UWORD_CPYBUF_SIZE);
2136
2137 /* load the buffer */
2138 for (i = 0; i < cpylen; i++)
2139 qat_uclo_fill_uwords(obj_handle, encap_page,
2140 &obj_handle->uword_buf[i],
2141 uw_physical_addr + i,
2142 uw_relative_addr + i, fill_pat);
2143
2144 /* copy the buffer to ustore */
2145 qat_hal_wr_uwords(handle, (unsigned char)ae,
2146 uw_physical_addr, cpylen,
2147 obj_handle->uword_buf);
2148
2149 uw_physical_addr += cpylen;
2150 uw_relative_addr += cpylen;
2151 words_num -= cpylen;
2152 }
2153 }
2154
qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle * handle,struct icp_qat_uof_image * image)2155 static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
2156 struct icp_qat_uof_image *image)
2157 {
2158 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2159 unsigned long ae_mask = handle->hal_handle->ae_mask;
2160 unsigned long cfg_ae_mask = handle->cfg_ae_mask;
2161 unsigned long ae_assigned = image->ae_assigned;
2162 struct icp_qat_uclo_aedata *aed;
2163 unsigned int ctx_mask, s;
2164 struct icp_qat_uclo_page *page;
2165 unsigned char ae;
2166 int ctx;
2167
2168 if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
2169 ctx_mask = 0xff;
2170 else
2171 ctx_mask = 0x55;
2172 /* load the default page and set assigned CTX PC
2173 * to the entrypoint address */
2174 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) {
2175 if (!test_bit(ae, &cfg_ae_mask))
2176 continue;
2177
2178 if (!test_bit(ae, &ae_assigned))
2179 continue;
2180
2181 aed = &obj_handle->ae_data[ae];
2182 /* find the slice to which this image is assigned */
2183 for (s = 0; s < aed->slice_num; s++) {
2184 if (image->ctx_assigned &
2185 aed->ae_slices[s].ctx_mask_assigned)
2186 break;
2187 }
2188 if (s >= aed->slice_num)
2189 continue;
2190 page = aed->ae_slices[s].page;
2191 if (!page->encap_page->def_page)
2192 continue;
2193 qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
2194
2195 page = aed->ae_slices[s].page;
2196 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
2197 aed->ae_slices[s].cur_page[ctx] =
2198 (ctx_mask & (1 << ctx)) ? page : NULL;
2199 qat_hal_set_live_ctx(handle, (unsigned char)ae,
2200 image->ctx_assigned);
2201 qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
2202 image->entry_address);
2203 }
2204 }
2205
qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle * handle)2206 static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle)
2207 {
2208 unsigned int i;
2209 struct icp_qat_fw_auth_desc *desc = NULL;
2210 struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle;
2211 struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr;
2212 int ret;
2213
2214 for (i = 0; i < sobj_handle->img_table.num_simgs; i++) {
2215 ret = qat_uclo_check_image(handle, simg_hdr[i].simg_buf,
2216 simg_hdr[i].simg_len,
2217 CSS_AE_FIRMWARE);
2218 if (ret)
2219 return ret;
2220
2221 if (qat_uclo_map_auth_fw(handle,
2222 (char *)simg_hdr[i].simg_buf,
2223 (unsigned int)
2224 simg_hdr[i].simg_len,
2225 &desc))
2226 goto wr_err;
2227 if (qat_uclo_auth_fw(handle, desc))
2228 goto wr_err;
2229 if (qat_uclo_is_broadcast(handle, i)) {
2230 if (qat_uclo_broadcast_load_fw(handle, desc))
2231 goto wr_err;
2232 } else {
2233 if (qat_uclo_load_fw(handle, desc))
2234 goto wr_err;
2235 }
2236 qat_uclo_ummap_auth_fw(handle, &desc);
2237 }
2238 return 0;
2239 wr_err:
2240 qat_uclo_ummap_auth_fw(handle, &desc);
2241 return -EINVAL;
2242 }
2243
qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle * handle)2244 static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle)
2245 {
2246 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
2247 unsigned int i;
2248
2249 if (qat_uclo_init_globals(handle))
2250 return -EINVAL;
2251 for (i = 0; i < obj_handle->uimage_num; i++) {
2252 if (!obj_handle->ae_uimage[i].img_ptr)
2253 return -EINVAL;
2254 if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
2255 return -EINVAL;
2256 qat_uclo_wr_uimage_page(handle,
2257 obj_handle->ae_uimage[i].img_ptr);
2258 }
2259 return 0;
2260 }
2261
qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle * handle)2262 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
2263 {
2264 return (handle->chip_info->fw_auth) ? qat_uclo_wr_suof_img(handle) :
2265 qat_uclo_wr_uof_img(handle);
2266 }
2267
qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle * handle,unsigned int cfg_ae_mask)2268 int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle,
2269 unsigned int cfg_ae_mask)
2270 {
2271 if (!cfg_ae_mask)
2272 return -EINVAL;
2273
2274 handle->cfg_ae_mask = cfg_ae_mask;
2275 return 0;
2276 }
2277