1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) 2 /* Copyright(c) 2014 - 2020 Intel Corporation */ 3 4 #define pr_fmt(fmt) "QAT: " fmt 5 6 #include <linux/align.h> 7 #include <linux/bitops.h> 8 #include <linux/slab.h> 9 #include <linux/ctype.h> 10 #include <linux/kernel.h> 11 #include <linux/delay.h> 12 #include <linux/pci_ids.h> 13 #include <linux/wordpart.h> 14 #include "adf_accel_devices.h" 15 #include "adf_common_drv.h" 16 #include "icp_qat_uclo.h" 17 #include "icp_qat_hal.h" 18 #include "icp_qat_fw_loader_handle.h" 19 20 #define UWORD_CPYBUF_SIZE 1024U 21 #define INVLD_UWORD 0xffffffffffull 22 #define PID_MINOR_REV 0xf 23 #define PID_MAJOR_REV (0xf << 4) 24 25 static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle, 26 unsigned int ae, unsigned int image_num) 27 { 28 struct icp_qat_uclo_aedata *ae_data; 29 struct icp_qat_uclo_encapme *encap_image; 30 struct icp_qat_uclo_page *page = NULL; 31 struct icp_qat_uclo_aeslice *ae_slice = NULL; 32 33 ae_data = &obj_handle->ae_data[ae]; 34 encap_image = &obj_handle->ae_uimage[image_num]; 35 ae_slice = &ae_data->ae_slices[ae_data->slice_num]; 36 ae_slice->encap_image = encap_image; 37 38 if (encap_image->img_ptr) { 39 ae_slice->ctx_mask_assigned = 40 encap_image->img_ptr->ctx_assigned; 41 ae_data->eff_ustore_size = obj_handle->ustore_phy_size; 42 } else { 43 ae_slice->ctx_mask_assigned = 0; 44 } 45 ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL); 46 if (!ae_slice->region) 47 return -ENOMEM; 48 ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL); 49 if (!ae_slice->page) 50 goto out_err; 51 page = ae_slice->page; 52 page->encap_page = encap_image->page; 53 ae_slice->page->region = ae_slice->region; 54 ae_data->slice_num++; 55 return 0; 56 out_err: 57 kfree(ae_slice->region); 58 ae_slice->region = NULL; 59 return -ENOMEM; 60 } 61 62 static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data) 63 { 64 unsigned int i; 65 66 if (!ae_data) { 67 pr_err("bad argument, ae_data is NULL\n"); 68 return -EINVAL; 69 } 70 71 for (i = 0; i < ae_data->slice_num; i++) { 72 kfree(ae_data->ae_slices[i].region); 73 ae_data->ae_slices[i].region = NULL; 74 kfree(ae_data->ae_slices[i].page); 75 ae_data->ae_slices[i].page = NULL; 76 } 77 return 0; 78 } 79 80 static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table, 81 unsigned int str_offset) 82 { 83 if (!str_table->table_len || str_offset > str_table->table_len) 84 return NULL; 85 return (char *)(((uintptr_t)(str_table->strings)) + str_offset); 86 } 87 88 static int qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr) 89 { 90 int maj = hdr->maj_ver & 0xff; 91 int min = hdr->min_ver & 0xff; 92 93 if (hdr->file_id != ICP_QAT_UOF_FID) { 94 pr_err("Invalid header 0x%x\n", hdr->file_id); 95 return -EINVAL; 96 } 97 if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) { 98 pr_err("bad UOF version, major 0x%x, minor 0x%x\n", maj, min); 99 return -EINVAL; 100 } 101 return 0; 102 } 103 104 static int qat_uclo_check_suof_format(struct icp_qat_suof_filehdr *suof_hdr) 105 { 106 int maj = suof_hdr->maj_ver & 0xff; 107 int min = suof_hdr->min_ver & 0xff; 108 109 if (suof_hdr->file_id != ICP_QAT_SUOF_FID) { 110 pr_err("invalid header 0x%x\n", suof_hdr->file_id); 111 return -EINVAL; 112 } 113 if (suof_hdr->fw_type != 0) { 114 pr_err("unsupported firmware type\n"); 115 return -EINVAL; 116 } 117 if (suof_hdr->num_chunks <= 0x1) { 118 pr_err("SUOF chunk amount is incorrect\n"); 119 return -EINVAL; 120 } 121 if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) { 122 pr_err("bad SUOF version, major 0x%x, minor 0x%x\n", maj, min); 123 return -EINVAL; 124 } 125 return 0; 126 } 127 128 static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle, 129 unsigned int addr, unsigned int *val, 130 unsigned int num_in_bytes) 131 { 132 unsigned int outval; 133 unsigned char *ptr = (unsigned char *)val; 134 135 while (num_in_bytes) { 136 memcpy(&outval, ptr, 4); 137 SRAM_WRITE(handle, addr, outval); 138 num_in_bytes -= 4; 139 ptr += 4; 140 addr += 4; 141 } 142 } 143 144 static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle, 145 unsigned char ae, unsigned int addr, 146 unsigned int *val, 147 unsigned int num_in_bytes) 148 { 149 unsigned int outval; 150 unsigned char *ptr = (unsigned char *)val; 151 152 addr >>= 0x2; /* convert to uword address */ 153 154 while (num_in_bytes) { 155 memcpy(&outval, ptr, 4); 156 qat_hal_wr_umem(handle, ae, addr++, 1, &outval); 157 num_in_bytes -= 4; 158 ptr += 4; 159 } 160 } 161 162 static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle, 163 unsigned char ae, 164 struct icp_qat_uof_batch_init 165 *umem_init_header) 166 { 167 struct icp_qat_uof_batch_init *umem_init; 168 169 if (!umem_init_header) 170 return; 171 umem_init = umem_init_header->next; 172 while (umem_init) { 173 unsigned int addr, *value, size; 174 175 ae = umem_init->ae; 176 addr = umem_init->addr; 177 value = umem_init->value; 178 size = umem_init->size; 179 qat_uclo_wr_umem_by_words(handle, ae, addr, value, size); 180 umem_init = umem_init->next; 181 } 182 } 183 184 static void 185 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle, 186 struct icp_qat_uof_batch_init **base) 187 { 188 struct icp_qat_uof_batch_init *umem_init; 189 190 umem_init = *base; 191 while (umem_init) { 192 struct icp_qat_uof_batch_init *pre; 193 194 pre = umem_init; 195 umem_init = umem_init->next; 196 kfree(pre); 197 } 198 *base = NULL; 199 } 200 201 static int qat_uclo_parse_num(char *str, unsigned int *num) 202 { 203 char buf[16] = {0}; 204 unsigned long ae = 0; 205 int i; 206 207 strscpy(buf, str, sizeof(buf)); 208 for (i = 0; i < 16; i++) { 209 if (!isdigit(buf[i])) { 210 buf[i] = '\0'; 211 break; 212 } 213 } 214 if ((kstrtoul(buf, 10, &ae))) 215 return -EFAULT; 216 217 *num = (unsigned int)ae; 218 return 0; 219 } 220 221 static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle, 222 struct icp_qat_uof_initmem *init_mem, 223 unsigned int size_range, unsigned int *ae) 224 { 225 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 226 char *str; 227 228 if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) { 229 pr_err("initmem is out of range"); 230 return -EINVAL; 231 } 232 if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) { 233 pr_err("Memory scope for init_mem error\n"); 234 return -EINVAL; 235 } 236 str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name); 237 if (!str) { 238 pr_err("AE name assigned in UOF init table is NULL\n"); 239 return -EINVAL; 240 } 241 if (qat_uclo_parse_num(str, ae)) { 242 pr_err("Parse num for AE number failed\n"); 243 return -EINVAL; 244 } 245 if (*ae >= ICP_QAT_UCLO_MAX_AE) { 246 pr_err("ae %d out of range\n", *ae); 247 return -EINVAL; 248 } 249 return 0; 250 } 251 252 static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle 253 *handle, struct icp_qat_uof_initmem 254 *init_mem, unsigned int ae, 255 struct icp_qat_uof_batch_init 256 **init_tab_base) 257 { 258 struct icp_qat_uof_batch_init *init_header, *tail; 259 struct icp_qat_uof_batch_init *mem_init, *tail_old; 260 struct icp_qat_uof_memvar_attr *mem_val_attr; 261 unsigned int i, flag = 0; 262 263 mem_val_attr = 264 (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem + 265 sizeof(struct icp_qat_uof_initmem)); 266 267 init_header = *init_tab_base; 268 if (!init_header) { 269 init_header = kzalloc(sizeof(*init_header), GFP_KERNEL); 270 if (!init_header) 271 return -ENOMEM; 272 init_header->size = 1; 273 *init_tab_base = init_header; 274 flag = 1; 275 } 276 tail_old = init_header; 277 while (tail_old->next) 278 tail_old = tail_old->next; 279 tail = tail_old; 280 for (i = 0; i < init_mem->val_attr_num; i++) { 281 mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL); 282 if (!mem_init) 283 goto out_err; 284 mem_init->ae = ae; 285 mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte; 286 mem_init->value = &mem_val_attr->value; 287 mem_init->size = 4; 288 mem_init->next = NULL; 289 tail->next = mem_init; 290 tail = mem_init; 291 init_header->size += qat_hal_get_ins_num(); 292 mem_val_attr++; 293 } 294 return 0; 295 out_err: 296 /* Do not free the list head unless we allocated it. */ 297 tail_old = tail_old->next; 298 if (flag) { 299 kfree(*init_tab_base); 300 *init_tab_base = NULL; 301 } 302 303 while (tail_old) { 304 mem_init = tail_old->next; 305 kfree(tail_old); 306 tail_old = mem_init; 307 } 308 return -ENOMEM; 309 } 310 311 static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle, 312 struct icp_qat_uof_initmem *init_mem) 313 { 314 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 315 unsigned int ae; 316 317 if (qat_uclo_fetch_initmem_ae(handle, init_mem, 318 handle->chip_info->lm_size, &ae)) 319 return -EINVAL; 320 if (qat_uclo_create_batch_init_list(handle, init_mem, ae, 321 &obj_handle->lm_init_tab[ae])) 322 return -EINVAL; 323 return 0; 324 } 325 326 static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle, 327 struct icp_qat_uof_initmem *init_mem) 328 { 329 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 330 unsigned int ae, ustore_size, uaddr, i; 331 struct icp_qat_uclo_aedata *aed; 332 333 ustore_size = obj_handle->ustore_phy_size; 334 if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae)) 335 return -EINVAL; 336 if (qat_uclo_create_batch_init_list(handle, init_mem, ae, 337 &obj_handle->umem_init_tab[ae])) 338 return -EINVAL; 339 /* set the highest ustore address referenced */ 340 uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2; 341 aed = &obj_handle->ae_data[ae]; 342 for (i = 0; i < aed->slice_num; i++) { 343 if (aed->ae_slices[i].encap_image->uwords_num < uaddr) 344 aed->ae_slices[i].encap_image->uwords_num = uaddr; 345 } 346 return 0; 347 } 348 349 static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle, 350 struct icp_qat_uof_initmem *init_mem) 351 { 352 switch (init_mem->region) { 353 case ICP_QAT_UOF_LMEM_REGION: 354 if (qat_uclo_init_lmem_seg(handle, init_mem)) 355 return -EINVAL; 356 break; 357 case ICP_QAT_UOF_UMEM_REGION: 358 if (qat_uclo_init_umem_seg(handle, init_mem)) 359 return -EINVAL; 360 break; 361 default: 362 pr_err("initmem region error. region type=0x%x\n", init_mem->region); 363 return -EINVAL; 364 } 365 return 0; 366 } 367 368 static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, 369 struct icp_qat_uclo_encapme *image) 370 { 371 unsigned int i; 372 struct icp_qat_uclo_encap_page *page; 373 struct icp_qat_uof_image *uof_image; 374 unsigned char ae; 375 unsigned int ustore_size; 376 unsigned int patt_pos; 377 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 378 unsigned long ae_mask = handle->hal_handle->ae_mask; 379 unsigned long cfg_ae_mask = handle->cfg_ae_mask; 380 u64 *fill_data; 381 382 uof_image = image->img_ptr; 383 fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(u64), 384 GFP_KERNEL); 385 if (!fill_data) 386 return -ENOMEM; 387 for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++) 388 memcpy(&fill_data[i], &uof_image->fill_pattern, 389 sizeof(u64)); 390 page = image->page; 391 392 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { 393 unsigned long ae_assigned = uof_image->ae_assigned; 394 395 if (!test_bit(ae, &ae_assigned)) 396 continue; 397 398 if (!test_bit(ae, &cfg_ae_mask)) 399 continue; 400 401 ustore_size = obj_handle->ae_data[ae].eff_ustore_size; 402 patt_pos = page->beg_addr_p + page->micro_words_num; 403 404 qat_hal_wr_uwords(handle, (unsigned char)ae, 0, 405 page->beg_addr_p, &fill_data[0]); 406 qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos, 407 ustore_size - patt_pos + 1, 408 &fill_data[page->beg_addr_p]); 409 } 410 kfree(fill_data); 411 return 0; 412 } 413 414 static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) 415 { 416 int i, ae; 417 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 418 struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem; 419 unsigned long ae_mask = handle->hal_handle->ae_mask; 420 421 for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) { 422 if (initmem->num_in_bytes) { 423 if (qat_uclo_init_ae_memory(handle, initmem)) 424 return -EINVAL; 425 } 426 initmem = (struct icp_qat_uof_initmem *)((uintptr_t)( 427 (uintptr_t)initmem + 428 sizeof(struct icp_qat_uof_initmem)) + 429 (sizeof(struct icp_qat_uof_memvar_attr) * 430 initmem->val_attr_num)); 431 } 432 433 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { 434 if (qat_hal_batch_wr_lm(handle, ae, 435 obj_handle->lm_init_tab[ae])) { 436 pr_err("fail to batch init lmem for AE %d\n", ae); 437 return -EINVAL; 438 } 439 qat_uclo_cleanup_batch_init_list(handle, 440 &obj_handle->lm_init_tab[ae]); 441 qat_uclo_batch_wr_umem(handle, ae, 442 obj_handle->umem_init_tab[ae]); 443 qat_uclo_cleanup_batch_init_list(handle, 444 &obj_handle-> 445 umem_init_tab[ae]); 446 } 447 return 0; 448 } 449 450 static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr, 451 char *chunk_id, void *cur) 452 { 453 int i; 454 struct icp_qat_uof_chunkhdr *chunk_hdr = 455 (struct icp_qat_uof_chunkhdr *) 456 ((uintptr_t)obj_hdr + sizeof(struct icp_qat_uof_objhdr)); 457 458 for (i = 0; i < obj_hdr->num_chunks; i++) { 459 if ((cur < (void *)&chunk_hdr[i]) && 460 !strncmp(chunk_hdr[i].chunk_id, chunk_id, 461 ICP_QAT_UOF_OBJID_LEN)) { 462 return &chunk_hdr[i]; 463 } 464 } 465 return NULL; 466 } 467 468 static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch) 469 { 470 int i; 471 unsigned int topbit = 1 << 0xF; 472 unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch); 473 474 reg ^= inbyte << 0x8; 475 for (i = 0; i < 0x8; i++) { 476 if (reg & topbit) 477 reg = (reg << 1) ^ 0x1021; 478 else 479 reg <<= 1; 480 } 481 return reg & 0xFFFF; 482 } 483 484 static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num) 485 { 486 unsigned int chksum = 0; 487 488 if (ptr) 489 while (num--) 490 chksum = qat_uclo_calc_checksum(chksum, *ptr++); 491 return chksum; 492 } 493 494 static struct icp_qat_uclo_objhdr * 495 qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr, 496 char *chunk_id) 497 { 498 struct icp_qat_uof_filechunkhdr *file_chunk; 499 struct icp_qat_uclo_objhdr *obj_hdr; 500 char *chunk; 501 int i; 502 503 file_chunk = (struct icp_qat_uof_filechunkhdr *) 504 (buf + sizeof(struct icp_qat_uof_filehdr)); 505 for (i = 0; i < file_hdr->num_chunks; i++) { 506 if (!strncmp(file_chunk->chunk_id, chunk_id, 507 ICP_QAT_UOF_OBJID_LEN)) { 508 chunk = buf + file_chunk->offset; 509 if (file_chunk->checksum != qat_uclo_calc_str_checksum( 510 chunk, file_chunk->size)) 511 break; 512 obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL); 513 if (!obj_hdr) 514 break; 515 obj_hdr->file_buff = chunk; 516 obj_hdr->checksum = file_chunk->checksum; 517 obj_hdr->size = file_chunk->size; 518 return obj_hdr; 519 } 520 file_chunk++; 521 } 522 return NULL; 523 } 524 525 static int 526 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj, 527 struct icp_qat_uof_image *image) 528 { 529 struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab; 530 struct icp_qat_uof_objtable *neigh_reg_tab; 531 struct icp_qat_uof_code_page *code_page; 532 533 code_page = (struct icp_qat_uof_code_page *) 534 ((char *)image + sizeof(struct icp_qat_uof_image)); 535 uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + 536 code_page->uc_var_tab_offset); 537 imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + 538 code_page->imp_var_tab_offset); 539 imp_expr_tab = (struct icp_qat_uof_objtable *) 540 (encap_uof_obj->beg_uof + 541 code_page->imp_expr_tab_offset); 542 if (uc_var_tab->entry_num || imp_var_tab->entry_num || 543 imp_expr_tab->entry_num) { 544 pr_err("UOF can't contain imported variable to be parsed\n"); 545 return -EINVAL; 546 } 547 neigh_reg_tab = (struct icp_qat_uof_objtable *) 548 (encap_uof_obj->beg_uof + 549 code_page->neigh_reg_tab_offset); 550 if (neigh_reg_tab->entry_num) { 551 pr_err("UOF can't contain neighbor register table\n"); 552 return -EINVAL; 553 } 554 if (image->numpages > 1) { 555 pr_err("UOF can't contain multiple pages\n"); 556 return -EINVAL; 557 } 558 if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) { 559 pr_err("UOF can't use shared control store feature\n"); 560 return -EFAULT; 561 } 562 if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) { 563 pr_err("UOF can't use reloadable feature\n"); 564 return -EFAULT; 565 } 566 return 0; 567 } 568 569 static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj 570 *encap_uof_obj, 571 struct icp_qat_uof_image *img, 572 struct icp_qat_uclo_encap_page *page) 573 { 574 struct icp_qat_uof_code_page *code_page; 575 struct icp_qat_uof_code_area *code_area; 576 struct icp_qat_uof_objtable *uword_block_tab; 577 struct icp_qat_uof_uword_block *uwblock; 578 int i; 579 580 code_page = (struct icp_qat_uof_code_page *) 581 ((char *)img + sizeof(struct icp_qat_uof_image)); 582 page->def_page = code_page->def_page; 583 page->page_region = code_page->page_region; 584 page->beg_addr_v = code_page->beg_addr_v; 585 page->beg_addr_p = code_page->beg_addr_p; 586 code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof + 587 code_page->code_area_offset); 588 page->micro_words_num = code_area->micro_words_num; 589 uword_block_tab = (struct icp_qat_uof_objtable *) 590 (encap_uof_obj->beg_uof + 591 code_area->uword_block_tab); 592 page->uwblock_num = uword_block_tab->entry_num; 593 uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab + 594 sizeof(struct icp_qat_uof_objtable)); 595 page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock; 596 for (i = 0; i < uword_block_tab->entry_num; i++) 597 page->uwblock[i].micro_words = 598 (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset; 599 } 600 601 static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, 602 struct icp_qat_uclo_encapme *ae_uimage, 603 int max_image) 604 { 605 int i, j; 606 struct icp_qat_uof_chunkhdr *chunk_hdr = NULL; 607 struct icp_qat_uof_image *image; 608 struct icp_qat_uof_objtable *ae_regtab; 609 struct icp_qat_uof_objtable *init_reg_sym_tab; 610 struct icp_qat_uof_objtable *sbreak_tab; 611 struct icp_qat_uof_encap_obj *encap_uof_obj = 612 &obj_handle->encap_uof_obj; 613 614 for (j = 0; j < max_image; j++) { 615 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, 616 ICP_QAT_UOF_IMAG, chunk_hdr); 617 if (!chunk_hdr) 618 break; 619 image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof + 620 chunk_hdr->offset); 621 ae_regtab = (struct icp_qat_uof_objtable *) 622 (image->reg_tab_offset + 623 obj_handle->obj_hdr->file_buff); 624 ae_uimage[j].ae_reg_num = ae_regtab->entry_num; 625 ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *) 626 (((char *)ae_regtab) + 627 sizeof(struct icp_qat_uof_objtable)); 628 init_reg_sym_tab = (struct icp_qat_uof_objtable *) 629 (image->init_reg_sym_tab + 630 obj_handle->obj_hdr->file_buff); 631 ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num; 632 ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *) 633 (((char *)init_reg_sym_tab) + 634 sizeof(struct icp_qat_uof_objtable)); 635 sbreak_tab = (struct icp_qat_uof_objtable *) 636 (image->sbreak_tab + obj_handle->obj_hdr->file_buff); 637 ae_uimage[j].sbreak_num = sbreak_tab->entry_num; 638 ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *) 639 (((char *)sbreak_tab) + 640 sizeof(struct icp_qat_uof_objtable)); 641 ae_uimage[j].img_ptr = image; 642 if (qat_uclo_check_image_compat(encap_uof_obj, image)) 643 goto out_err; 644 ae_uimage[j].page = 645 kzalloc(sizeof(struct icp_qat_uclo_encap_page), 646 GFP_KERNEL); 647 if (!ae_uimage[j].page) 648 goto out_err; 649 qat_uclo_map_image_page(encap_uof_obj, image, 650 ae_uimage[j].page); 651 } 652 return j; 653 out_err: 654 for (i = 0; i < j; i++) 655 kfree(ae_uimage[i].page); 656 return 0; 657 } 658 659 static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae) 660 { 661 int i, ae; 662 int mflag = 0; 663 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 664 unsigned long ae_mask = handle->hal_handle->ae_mask; 665 unsigned long cfg_ae_mask = handle->cfg_ae_mask; 666 667 for_each_set_bit(ae, &ae_mask, max_ae) { 668 if (!test_bit(ae, &cfg_ae_mask)) 669 continue; 670 671 for (i = 0; i < obj_handle->uimage_num; i++) { 672 unsigned long ae_assigned = obj_handle->ae_uimage[i].img_ptr->ae_assigned; 673 674 if (!test_bit(ae, &ae_assigned)) 675 continue; 676 mflag = 1; 677 if (qat_uclo_init_ae_data(obj_handle, ae, i)) 678 return -EINVAL; 679 } 680 } 681 if (!mflag) { 682 pr_err("uimage uses AE not set\n"); 683 return -EINVAL; 684 } 685 return 0; 686 } 687 688 static struct icp_qat_uof_strtable * 689 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr, 690 char *tab_name, struct icp_qat_uof_strtable *str_table) 691 { 692 struct icp_qat_uof_chunkhdr *chunk_hdr; 693 694 chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *) 695 obj_hdr->file_buff, tab_name, NULL); 696 if (chunk_hdr) { 697 int hdr_size; 698 699 memcpy(&str_table->table_len, obj_hdr->file_buff + 700 chunk_hdr->offset, sizeof(str_table->table_len)); 701 hdr_size = (char *)&str_table->strings - (char *)str_table; 702 str_table->strings = (uintptr_t)obj_hdr->file_buff + 703 chunk_hdr->offset + hdr_size; 704 return str_table; 705 } 706 return NULL; 707 } 708 709 static void 710 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj, 711 struct icp_qat_uclo_init_mem_table *init_mem_tab) 712 { 713 struct icp_qat_uof_chunkhdr *chunk_hdr; 714 715 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, 716 ICP_QAT_UOF_IMEM, NULL); 717 if (chunk_hdr) { 718 memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof + 719 chunk_hdr->offset, sizeof(unsigned int)); 720 init_mem_tab->init_mem = (struct icp_qat_uof_initmem *) 721 (encap_uof_obj->beg_uof + chunk_hdr->offset + 722 sizeof(unsigned int)); 723 } 724 } 725 726 static unsigned int 727 qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle) 728 { 729 switch (handle->pci_dev->device) { 730 case PCI_DEVICE_ID_INTEL_QAT_DH895XCC: 731 return ICP_QAT_AC_895XCC_DEV_TYPE; 732 case PCI_DEVICE_ID_INTEL_QAT_C62X: 733 return ICP_QAT_AC_C62X_DEV_TYPE; 734 case PCI_DEVICE_ID_INTEL_QAT_C3XXX: 735 return ICP_QAT_AC_C3XXX_DEV_TYPE; 736 case PCI_DEVICE_ID_INTEL_QAT_4XXX: 737 case PCI_DEVICE_ID_INTEL_QAT_401XX: 738 case PCI_DEVICE_ID_INTEL_QAT_402XX: 739 case PCI_DEVICE_ID_INTEL_QAT_420XX: 740 return ICP_QAT_AC_4XXX_A_DEV_TYPE; 741 case PCI_DEVICE_ID_INTEL_QAT_6XXX: 742 return ICP_QAT_AC_6XXX_DEV_TYPE; 743 default: 744 pr_err("unsupported device 0x%x\n", handle->pci_dev->device); 745 return 0; 746 } 747 } 748 749 static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) 750 { 751 unsigned int maj_ver, prod_type = obj_handle->prod_type; 752 753 if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) { 754 pr_err("UOF type 0x%x doesn't match with platform 0x%x\n", 755 obj_handle->encap_uof_obj.obj_hdr->ac_dev_type, 756 prod_type); 757 return -EINVAL; 758 } 759 maj_ver = obj_handle->prod_rev & 0xff; 760 if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver || 761 obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) { 762 pr_err("UOF majVer 0x%x out of range\n", maj_ver); 763 return -EINVAL; 764 } 765 return 0; 766 } 767 768 static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, 769 unsigned char ae, unsigned char ctx_mask, 770 enum icp_qat_uof_regtype reg_type, 771 unsigned short reg_addr, unsigned int value) 772 { 773 switch (reg_type) { 774 case ICP_GPA_ABS: 775 case ICP_GPB_ABS: 776 ctx_mask = 0; 777 fallthrough; 778 case ICP_GPA_REL: 779 case ICP_GPB_REL: 780 return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type, 781 reg_addr, value); 782 case ICP_SR_ABS: 783 case ICP_DR_ABS: 784 case ICP_SR_RD_ABS: 785 case ICP_DR_RD_ABS: 786 ctx_mask = 0; 787 fallthrough; 788 case ICP_SR_REL: 789 case ICP_DR_REL: 790 case ICP_SR_RD_REL: 791 case ICP_DR_RD_REL: 792 return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type, 793 reg_addr, value); 794 case ICP_SR_WR_ABS: 795 case ICP_DR_WR_ABS: 796 ctx_mask = 0; 797 fallthrough; 798 case ICP_SR_WR_REL: 799 case ICP_DR_WR_REL: 800 return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type, 801 reg_addr, value); 802 case ICP_NEIGH_REL: 803 return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value); 804 default: 805 pr_err("UOF uses not supported reg type 0x%x\n", reg_type); 806 return -EFAULT; 807 } 808 return 0; 809 } 810 811 static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle, 812 unsigned int ae, 813 struct icp_qat_uclo_encapme *encap_ae) 814 { 815 unsigned int i; 816 unsigned char ctx_mask; 817 struct icp_qat_uof_init_regsym *init_regsym; 818 819 if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) == 820 ICP_QAT_UCLO_MAX_CTX) 821 ctx_mask = 0xff; 822 else 823 ctx_mask = 0x55; 824 825 for (i = 0; i < encap_ae->init_regsym_num; i++) { 826 unsigned int exp_res; 827 828 init_regsym = &encap_ae->init_regsym[i]; 829 exp_res = init_regsym->value; 830 switch (init_regsym->init_type) { 831 case ICP_QAT_UOF_INIT_REG: 832 qat_uclo_init_reg(handle, ae, ctx_mask, 833 (enum icp_qat_uof_regtype) 834 init_regsym->reg_type, 835 (unsigned short)init_regsym->reg_addr, 836 exp_res); 837 break; 838 case ICP_QAT_UOF_INIT_REG_CTX: 839 /* check if ctx is appropriate for the ctxMode */ 840 if (!((1 << init_regsym->ctx) & ctx_mask)) { 841 pr_err("invalid ctx num = 0x%x\n", init_regsym->ctx); 842 return -EINVAL; 843 } 844 qat_uclo_init_reg(handle, ae, 845 (unsigned char) 846 (1 << init_regsym->ctx), 847 (enum icp_qat_uof_regtype) 848 init_regsym->reg_type, 849 (unsigned short)init_regsym->reg_addr, 850 exp_res); 851 break; 852 case ICP_QAT_UOF_INIT_EXPR: 853 pr_err("INIT_EXPR feature not supported\n"); 854 return -EINVAL; 855 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP: 856 pr_err("INIT_EXPR_ENDIAN_SWAP feature not supported\n"); 857 return -EINVAL; 858 default: 859 break; 860 } 861 } 862 return 0; 863 } 864 865 static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle) 866 { 867 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 868 unsigned long ae_mask = handle->hal_handle->ae_mask; 869 struct icp_qat_uclo_aedata *aed; 870 unsigned int s, ae; 871 872 if (obj_handle->global_inited) 873 return 0; 874 if (obj_handle->init_mem_tab.entry_num) { 875 if (qat_uclo_init_memory(handle)) { 876 pr_err("initialize memory failed\n"); 877 return -EINVAL; 878 } 879 } 880 881 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { 882 aed = &obj_handle->ae_data[ae]; 883 for (s = 0; s < aed->slice_num; s++) { 884 if (!aed->ae_slices[s].encap_image) 885 continue; 886 if (qat_uclo_init_reg_sym(handle, ae, aed->ae_slices[s].encap_image)) 887 return -EINVAL; 888 } 889 } 890 obj_handle->global_inited = 1; 891 return 0; 892 } 893 894 static int qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle, 895 struct icp_qat_uclo_objhandle *obj_handle, 896 unsigned char ae, 897 struct icp_qat_uof_image *uof_image) 898 { 899 unsigned char mode; 900 int ret; 901 902 mode = ICP_QAT_CTX_MODE(uof_image->ae_mode); 903 ret = qat_hal_set_ae_ctx_mode(handle, ae, mode); 904 if (ret) { 905 pr_err("qat_hal_set_ae_ctx_mode error\n"); 906 return ret; 907 } 908 if (handle->chip_info->nn) { 909 mode = ICP_QAT_NN_MODE(uof_image->ae_mode); 910 ret = qat_hal_set_ae_nn_mode(handle, ae, mode); 911 if (ret) { 912 pr_err("qat_hal_set_ae_nn_mode error\n"); 913 return ret; 914 } 915 } 916 mode = ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode); 917 ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, mode); 918 if (ret) { 919 pr_err("qat_hal_set_ae_lm_mode LMEM0 error\n"); 920 return ret; 921 } 922 mode = ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode); 923 ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, mode); 924 if (ret) { 925 pr_err("qat_hal_set_ae_lm_mode LMEM1 error\n"); 926 return ret; 927 } 928 if (handle->chip_info->lm2lm3) { 929 mode = ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode); 930 ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, mode); 931 if (ret) { 932 pr_err("qat_hal_set_ae_lm_mode LMEM2 error\n"); 933 return ret; 934 } 935 mode = ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode); 936 ret = qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, mode); 937 if (ret) { 938 pr_err("qat_hal_set_ae_lm_mode LMEM3 error\n"); 939 return ret; 940 } 941 mode = ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode); 942 qat_hal_set_ae_tindex_mode(handle, ae, mode); 943 } 944 return 0; 945 } 946 947 static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle) 948 { 949 struct icp_qat_uof_image *uof_image; 950 struct icp_qat_uclo_aedata *ae_data; 951 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 952 unsigned long ae_mask = handle->hal_handle->ae_mask; 953 unsigned long cfg_ae_mask = handle->cfg_ae_mask; 954 unsigned char ae, s; 955 int error; 956 957 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { 958 if (!test_bit(ae, &cfg_ae_mask)) 959 continue; 960 961 ae_data = &obj_handle->ae_data[ae]; 962 for (s = 0; s < min_t(unsigned int, ae_data->slice_num, 963 ICP_QAT_UCLO_MAX_CTX); s++) { 964 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image) 965 continue; 966 uof_image = ae_data->ae_slices[s].encap_image->img_ptr; 967 error = qat_hal_set_modes(handle, obj_handle, ae, 968 uof_image); 969 if (error) 970 return error; 971 } 972 } 973 return 0; 974 } 975 976 static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle) 977 { 978 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 979 struct icp_qat_uclo_encapme *image; 980 int a; 981 982 for (a = 0; a < obj_handle->uimage_num; a++) { 983 image = &obj_handle->ae_uimage[a]; 984 image->uwords_num = image->page->beg_addr_p + 985 image->page->micro_words_num; 986 } 987 } 988 989 static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) 990 { 991 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 992 unsigned int ae; 993 994 obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff; 995 obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *) 996 obj_handle->obj_hdr->file_buff; 997 obj_handle->uword_in_bytes = 6; 998 obj_handle->prod_type = qat_uclo_get_dev_type(handle); 999 obj_handle->prod_rev = PID_MAJOR_REV | 1000 (PID_MINOR_REV & handle->hal_handle->revision_id); 1001 if (qat_uclo_check_uof_compat(obj_handle)) { 1002 pr_err("UOF incompatible\n"); 1003 return -EINVAL; 1004 } 1005 obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(u64), 1006 GFP_KERNEL); 1007 if (!obj_handle->uword_buf) 1008 return -ENOMEM; 1009 obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE; 1010 if (!obj_handle->obj_hdr->file_buff || 1011 !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT, 1012 &obj_handle->str_table)) { 1013 pr_err("UOF doesn't have effective images\n"); 1014 goto out_err; 1015 } 1016 obj_handle->uimage_num = 1017 qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage, 1018 ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX); 1019 if (!obj_handle->uimage_num) 1020 goto out_err; 1021 if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) { 1022 pr_err("Bad object\n"); 1023 goto out_check_uof_aemask_err; 1024 } 1025 qat_uclo_init_uword_num(handle); 1026 qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj, 1027 &obj_handle->init_mem_tab); 1028 if (qat_uclo_set_ae_mode(handle)) 1029 goto out_check_uof_aemask_err; 1030 return 0; 1031 out_check_uof_aemask_err: 1032 for (ae = 0; ae < obj_handle->uimage_num; ae++) 1033 kfree(obj_handle->ae_uimage[ae].page); 1034 out_err: 1035 kfree(obj_handle->uword_buf); 1036 return -EFAULT; 1037 } 1038 1039 static unsigned int qat_uclo_simg_hdr2sign_len(struct icp_qat_fw_loader_handle *handle) 1040 { 1041 if (handle->chip_info->dual_sign) 1042 return ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN; 1043 1044 return ICP_QAT_AE_IMG_OFFSET(handle); 1045 } 1046 1047 static unsigned int qat_uclo_simg_hdr2cont_len(struct icp_qat_fw_loader_handle *handle) 1048 { 1049 if (handle->chip_info->dual_sign) 1050 return ICP_QAT_DUALSIGN_OPAQUE_DATA_LEN + ICP_QAT_DUALSIGN_MISC_INFO_LEN; 1051 1052 return ICP_QAT_AE_IMG_OFFSET(handle); 1053 } 1054 1055 static unsigned int qat_uclo_simg_fw_type(struct icp_qat_fw_loader_handle *handle, void *img_ptr) 1056 { 1057 struct icp_qat_css_hdr *hdr = img_ptr; 1058 char *fw_hdr = img_ptr; 1059 unsigned int offset; 1060 1061 if (handle->chip_info->dual_sign) { 1062 offset = qat_uclo_simg_hdr2sign_len(handle) + ICP_QAT_DUALSIGN_FW_TYPE_LEN; 1063 return *(fw_hdr + offset); 1064 } 1065 1066 return hdr->fw_type; 1067 } 1068 1069 static int qat_uclo_map_suof_file_hdr(struct icp_qat_fw_loader_handle *handle, 1070 struct icp_qat_suof_filehdr *suof_ptr, 1071 int suof_size) 1072 { 1073 unsigned int check_sum = 0; 1074 unsigned int min_ver_offset = 0; 1075 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; 1076 1077 suof_handle->file_id = ICP_QAT_SUOF_FID; 1078 suof_handle->suof_buf = (char *)suof_ptr; 1079 suof_handle->suof_size = suof_size; 1080 min_ver_offset = suof_size - offsetof(struct icp_qat_suof_filehdr, 1081 min_ver); 1082 check_sum = qat_uclo_calc_str_checksum((char *)&suof_ptr->min_ver, 1083 min_ver_offset); 1084 if (check_sum != suof_ptr->check_sum) { 1085 pr_err("incorrect SUOF checksum\n"); 1086 return -EINVAL; 1087 } 1088 suof_handle->check_sum = suof_ptr->check_sum; 1089 suof_handle->min_ver = suof_ptr->min_ver; 1090 suof_handle->maj_ver = suof_ptr->maj_ver; 1091 suof_handle->fw_type = suof_ptr->fw_type; 1092 return 0; 1093 } 1094 1095 static void qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle, 1096 struct icp_qat_suof_img_hdr *suof_img_hdr, 1097 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) 1098 { 1099 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; 1100 unsigned int offset = qat_uclo_simg_hdr2cont_len(handle); 1101 struct icp_qat_suof_objhdr *suof_objhdr; 1102 struct icp_qat_simg_ae_mode *ae_mode; 1103 1104 suof_img_hdr->simg_buf = (suof_handle->suof_buf + 1105 suof_chunk_hdr->offset + 1106 sizeof(*suof_objhdr)); 1107 suof_img_hdr->simg_len = ((struct icp_qat_suof_objhdr *)(uintptr_t) 1108 (suof_handle->suof_buf + 1109 suof_chunk_hdr->offset))->img_length; 1110 1111 suof_img_hdr->css_header = suof_img_hdr->simg_buf; 1112 suof_img_hdr->css_simg = suof_img_hdr->css_header + offset; 1113 1114 ae_mode = (struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg); 1115 suof_img_hdr->ae_mask = ae_mode->ae_mask; 1116 suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name; 1117 suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data; 1118 suof_img_hdr->fw_type = ae_mode->fw_type; 1119 } 1120 1121 static void 1122 qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle, 1123 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) 1124 { 1125 char **sym_str = (char **)&suof_handle->sym_str; 1126 unsigned int *sym_size = &suof_handle->sym_size; 1127 struct icp_qat_suof_strtable *str_table_obj; 1128 1129 *sym_size = *(unsigned int *)(uintptr_t) 1130 (suof_chunk_hdr->offset + suof_handle->suof_buf); 1131 *sym_str = (char *)(uintptr_t) 1132 (suof_handle->suof_buf + suof_chunk_hdr->offset + 1133 sizeof(str_table_obj->tab_length)); 1134 } 1135 1136 static int qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle, 1137 struct icp_qat_suof_img_hdr *img_hdr) 1138 { 1139 struct icp_qat_simg_ae_mode *img_ae_mode = NULL; 1140 unsigned int prod_rev, maj_ver, prod_type; 1141 1142 prod_type = qat_uclo_get_dev_type(handle); 1143 img_ae_mode = (struct icp_qat_simg_ae_mode *)img_hdr->css_simg; 1144 prod_rev = PID_MAJOR_REV | 1145 (PID_MINOR_REV & handle->hal_handle->revision_id); 1146 if (img_ae_mode->dev_type != prod_type) { 1147 pr_err("incompatible product type %x\n", img_ae_mode->dev_type); 1148 return -EINVAL; 1149 } 1150 maj_ver = prod_rev & 0xff; 1151 if (maj_ver > img_ae_mode->devmax_ver || 1152 maj_ver < img_ae_mode->devmin_ver) { 1153 pr_err("incompatible device majver 0x%x\n", maj_ver); 1154 return -EINVAL; 1155 } 1156 return 0; 1157 } 1158 1159 static void qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle) 1160 { 1161 struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; 1162 1163 kfree(sobj_handle->img_table.simg_hdr); 1164 sobj_handle->img_table.simg_hdr = NULL; 1165 kfree(handle->sobj_handle); 1166 handle->sobj_handle = NULL; 1167 } 1168 1169 static void qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr, 1170 unsigned int img_id, unsigned int num_simgs) 1171 { 1172 struct icp_qat_suof_img_hdr img_header; 1173 1174 if (img_id != num_simgs - 1) { 1175 memcpy(&img_header, &suof_img_hdr[num_simgs - 1], 1176 sizeof(*suof_img_hdr)); 1177 memcpy(&suof_img_hdr[num_simgs - 1], &suof_img_hdr[img_id], 1178 sizeof(*suof_img_hdr)); 1179 memcpy(&suof_img_hdr[img_id], &img_header, 1180 sizeof(*suof_img_hdr)); 1181 } 1182 } 1183 1184 static int qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle, 1185 struct icp_qat_suof_filehdr *suof_ptr, 1186 int suof_size) 1187 { 1188 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; 1189 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL; 1190 struct icp_qat_suof_img_hdr *suof_img_hdr = NULL; 1191 int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE; 1192 unsigned int i = 0; 1193 struct icp_qat_suof_img_hdr img_header; 1194 1195 if (!suof_ptr || suof_size == 0) { 1196 pr_err("input parameter SUOF pointer/size is NULL\n"); 1197 return -EINVAL; 1198 } 1199 if (qat_uclo_check_suof_format(suof_ptr)) 1200 return -EINVAL; 1201 ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size); 1202 if (ret) 1203 return ret; 1204 suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *) 1205 ((uintptr_t)suof_ptr + sizeof(*suof_ptr)); 1206 1207 qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr); 1208 suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1; 1209 1210 if (suof_handle->img_table.num_simgs != 0) { 1211 suof_img_hdr = kcalloc(suof_handle->img_table.num_simgs, 1212 sizeof(img_header), 1213 GFP_KERNEL); 1214 if (!suof_img_hdr) 1215 return -ENOMEM; 1216 suof_handle->img_table.simg_hdr = suof_img_hdr; 1217 1218 for (i = 0; i < suof_handle->img_table.num_simgs; i++) { 1219 qat_uclo_map_simg(handle, &suof_img_hdr[i], 1220 &suof_chunk_hdr[1 + i]); 1221 ret = qat_uclo_check_simg_compat(handle, 1222 &suof_img_hdr[i]); 1223 if (ret) 1224 return ret; 1225 suof_img_hdr[i].ae_mask &= handle->cfg_ae_mask; 1226 if ((suof_img_hdr[i].ae_mask & 0x1) != 0) 1227 ae0_img = i; 1228 } 1229 1230 if (!handle->chip_info->tgroup_share_ustore) { 1231 qat_uclo_tail_img(suof_img_hdr, ae0_img, 1232 suof_handle->img_table.num_simgs); 1233 } 1234 } 1235 return 0; 1236 } 1237 1238 #define ADD_ADDR(high, low) ((((u64)high) << 32) + low) 1239 1240 static int qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, 1241 struct icp_qat_fw_auth_desc *desc) 1242 { 1243 u32 fcu_sts, retry = 0; 1244 u32 fcu_ctl_csr, fcu_sts_csr; 1245 u32 fcu_dram_hi_csr, fcu_dram_lo_csr; 1246 u64 bus_addr; 1247 1248 bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low) 1249 - sizeof(struct icp_qat_auth_chunk); 1250 1251 fcu_ctl_csr = handle->chip_info->fcu_ctl_csr; 1252 fcu_sts_csr = handle->chip_info->fcu_sts_csr; 1253 fcu_dram_hi_csr = handle->chip_info->fcu_dram_addr_hi; 1254 fcu_dram_lo_csr = handle->chip_info->fcu_dram_addr_lo; 1255 1256 SET_CAP_CSR(handle, fcu_dram_hi_csr, bus_addr >> BITS_PER_TYPE(u32)); 1257 SET_CAP_CSR(handle, fcu_dram_lo_csr, bus_addr); 1258 SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH); 1259 1260 do { 1261 msleep(FW_AUTH_WAIT_PERIOD); 1262 fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr); 1263 if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL) 1264 goto auth_fail; 1265 if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1)) 1266 if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE) 1267 return 0; 1268 } while (retry++ < FW_AUTH_MAX_RETRY); 1269 auth_fail: 1270 pr_err("authentication error (FCU_STATUS = 0x%x),retry = %d\n", 1271 fcu_sts & FCU_AUTH_STS_MASK, retry); 1272 return -EINVAL; 1273 } 1274 1275 static bool qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle *handle, 1276 int imgid) 1277 { 1278 struct icp_qat_suof_handle *sobj_handle; 1279 1280 if (!handle->chip_info->tgroup_share_ustore) 1281 return false; 1282 1283 sobj_handle = (struct icp_qat_suof_handle *)handle->sobj_handle; 1284 if (handle->hal_handle->admin_ae_mask & 1285 sobj_handle->img_table.simg_hdr[imgid].ae_mask) 1286 return false; 1287 1288 return true; 1289 } 1290 1291 static int qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle, 1292 struct icp_qat_fw_auth_desc *desc) 1293 { 1294 unsigned long ae_mask = handle->hal_handle->ae_mask; 1295 unsigned long desc_ae_mask = desc->ae_mask; 1296 u32 fcu_sts, ae_broadcast_mask = 0; 1297 u32 fcu_loaded_csr, ae_loaded; 1298 u32 fcu_sts_csr, fcu_ctl_csr; 1299 unsigned int ae, retry = 0; 1300 1301 if (handle->chip_info->tgroup_share_ustore) { 1302 fcu_ctl_csr = handle->chip_info->fcu_ctl_csr; 1303 fcu_sts_csr = handle->chip_info->fcu_sts_csr; 1304 fcu_loaded_csr = handle->chip_info->fcu_loaded_ae_csr; 1305 } else { 1306 pr_err("Chip 0x%x doesn't support broadcast load\n", handle->pci_dev->device); 1307 return -EINVAL; 1308 } 1309 1310 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { 1311 if (qat_hal_check_ae_active(handle, (unsigned char)ae)) { 1312 pr_err("Broadcast load failed. AE is not enabled or active.\n"); 1313 return -EINVAL; 1314 } 1315 1316 if (test_bit(ae, &desc_ae_mask)) 1317 ae_broadcast_mask |= 1 << ae; 1318 } 1319 1320 if (ae_broadcast_mask) { 1321 SET_CAP_CSR(handle, FCU_ME_BROADCAST_MASK_TYPE, 1322 ae_broadcast_mask); 1323 1324 SET_CAP_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_LOAD); 1325 1326 do { 1327 msleep(FW_AUTH_WAIT_PERIOD); 1328 fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr); 1329 fcu_sts &= FCU_AUTH_STS_MASK; 1330 1331 if (fcu_sts == FCU_STS_LOAD_FAIL) { 1332 pr_err("Broadcast load failed: 0x%x)\n", fcu_sts); 1333 return -EINVAL; 1334 } else if (fcu_sts == FCU_STS_LOAD_DONE) { 1335 ae_loaded = GET_CAP_CSR(handle, fcu_loaded_csr); 1336 ae_loaded >>= handle->chip_info->fcu_loaded_ae_pos; 1337 1338 if ((ae_loaded & ae_broadcast_mask) == ae_broadcast_mask) 1339 break; 1340 } 1341 } while (retry++ < FW_AUTH_MAX_RETRY); 1342 1343 if (retry > FW_AUTH_MAX_RETRY) { 1344 pr_err("broadcast load failed timeout %d\n", retry); 1345 return -EINVAL; 1346 } 1347 } 1348 return 0; 1349 } 1350 1351 static int qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle, 1352 struct icp_firml_dram_desc *dram_desc, 1353 unsigned int size) 1354 { 1355 void *vptr; 1356 dma_addr_t ptr; 1357 1358 vptr = dma_alloc_coherent(&handle->pci_dev->dev, 1359 size, &ptr, GFP_KERNEL); 1360 if (!vptr) 1361 return -ENOMEM; 1362 dram_desc->dram_base_addr_v = vptr; 1363 dram_desc->dram_bus_addr = ptr; 1364 dram_desc->dram_size = size; 1365 return 0; 1366 } 1367 1368 static void qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle, 1369 struct icp_firml_dram_desc *dram_desc) 1370 { 1371 if (handle && dram_desc && dram_desc->dram_base_addr_v) { 1372 dma_free_coherent(&handle->pci_dev->dev, 1373 (size_t)(dram_desc->dram_size), 1374 dram_desc->dram_base_addr_v, 1375 dram_desc->dram_bus_addr); 1376 } 1377 1378 if (dram_desc) 1379 memset(dram_desc, 0, sizeof(*dram_desc)); 1380 } 1381 1382 static void qat_uclo_ummap_auth_fw(struct icp_qat_fw_loader_handle *handle, 1383 struct icp_qat_fw_auth_desc **desc) 1384 { 1385 struct icp_firml_dram_desc dram_desc; 1386 1387 if (*desc) { 1388 dram_desc.dram_base_addr_v = *desc; 1389 dram_desc.dram_bus_addr = ((struct icp_qat_auth_chunk *) 1390 (*desc))->chunk_bus_addr; 1391 dram_desc.dram_size = ((struct icp_qat_auth_chunk *) 1392 (*desc))->chunk_size; 1393 qat_uclo_simg_free(handle, &dram_desc); 1394 } 1395 } 1396 1397 static int qat_uclo_check_image(struct icp_qat_fw_loader_handle *handle, 1398 void *image, unsigned int size, 1399 unsigned int fw_type) 1400 { 1401 char *fw_type_name = fw_type ? "MMP" : "AE"; 1402 unsigned int css_dword_size = sizeof(u32); 1403 unsigned int header_len, simg_type; 1404 struct icp_qat_css_hdr *css_hdr; 1405 1406 if (handle->chip_info->fw_auth) { 1407 header_len = qat_uclo_simg_hdr2sign_len(handle); 1408 simg_type = qat_uclo_simg_fw_type(handle, image); 1409 css_hdr = image; 1410 1411 if (handle->chip_info->dual_sign) { 1412 if (css_hdr->module_type != ICP_QAT_DUALSIGN_MODULE_TYPE) 1413 goto err; 1414 if (css_hdr->header_len != ICP_QAT_DUALSIGN_HDR_LEN) 1415 goto err; 1416 if (css_hdr->header_ver != ICP_QAT_DUALSIGN_HDR_VER) 1417 goto err; 1418 } else { 1419 if (css_hdr->header_len * css_dword_size != header_len) 1420 goto err; 1421 if (css_hdr->size * css_dword_size != size) 1422 goto err; 1423 if (size <= header_len) 1424 goto err; 1425 } 1426 1427 if (fw_type != simg_type) 1428 goto err; 1429 1430 size -= header_len; 1431 } 1432 1433 if (fw_type == CSS_AE_FIRMWARE) { 1434 if (size < sizeof(struct icp_qat_simg_ae_mode *) + 1435 ICP_QAT_SIMG_AE_INIT_SEQ_LEN) 1436 goto err; 1437 if (size > ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) 1438 goto err; 1439 } else if (fw_type == CSS_MMP_FIRMWARE) { 1440 if (size > ICP_QAT_CSS_RSA3K_MAX_IMAGE_LEN) 1441 goto err; 1442 } else { 1443 pr_err("Unsupported firmware type\n"); 1444 return -EINVAL; 1445 } 1446 return 0; 1447 1448 err: 1449 pr_err("Invalid %s firmware image\n", fw_type_name); 1450 return -EINVAL; 1451 } 1452 1453 static int qat_uclo_build_auth_desc_RSA(struct icp_qat_fw_loader_handle *handle, 1454 char *image, unsigned int size, 1455 struct icp_firml_dram_desc *dram_desc, 1456 unsigned int fw_type, struct icp_qat_fw_auth_desc **desc) 1457 { 1458 struct icp_qat_css_hdr *css_hdr = (struct icp_qat_css_hdr *)image; 1459 struct icp_qat_simg_ae_mode *simg_ae_mode; 1460 struct icp_qat_fw_auth_desc *auth_desc; 1461 char *virt_addr, *virt_base; 1462 u64 bus_addr; 1463 1464 virt_base = dram_desc->dram_base_addr_v; 1465 virt_base += sizeof(struct icp_qat_auth_chunk); 1466 bus_addr = dram_desc->dram_bus_addr + sizeof(struct icp_qat_auth_chunk); 1467 auth_desc = dram_desc->dram_base_addr_v; 1468 auth_desc->css_hdr_high = upper_32_bits(bus_addr); 1469 auth_desc->css_hdr_low = lower_32_bits(bus_addr); 1470 virt_addr = virt_base; 1471 1472 memcpy(virt_addr, image, sizeof(*css_hdr)); 1473 /* pub key */ 1474 bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) + 1475 sizeof(*css_hdr); 1476 virt_addr = virt_addr + sizeof(*css_hdr); 1477 1478 auth_desc->fwsk_pub_high = upper_32_bits(bus_addr); 1479 auth_desc->fwsk_pub_low = lower_32_bits(bus_addr); 1480 1481 memcpy(virt_addr, image + sizeof(*css_hdr), ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)); 1482 /* padding */ 1483 memset((void *)(uintptr_t)(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)), 1484 0, ICP_QAT_CSS_FWSK_PAD_LEN(handle)); 1485 1486 /* exponent */ 1487 memcpy(virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + 1488 ICP_QAT_CSS_FWSK_PAD_LEN(handle), image + sizeof(*css_hdr) + 1489 ICP_QAT_CSS_FWSK_MODULUS_LEN(handle), sizeof(unsigned int)); 1490 1491 /* signature */ 1492 bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high, 1493 auth_desc->fwsk_pub_low) + 1494 ICP_QAT_CSS_FWSK_PUB_LEN(handle); 1495 virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(handle); 1496 auth_desc->signature_high = upper_32_bits(bus_addr); 1497 auth_desc->signature_low = lower_32_bits(bus_addr); 1498 1499 memcpy(virt_addr, image + sizeof(*css_hdr) + ICP_QAT_CSS_FWSK_MODULUS_LEN(handle) + 1500 ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle), ICP_QAT_CSS_SIGNATURE_LEN(handle)); 1501 1502 bus_addr = ADD_ADDR(auth_desc->signature_high, 1503 auth_desc->signature_low) + 1504 ICP_QAT_CSS_SIGNATURE_LEN(handle); 1505 virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle); 1506 1507 auth_desc->img_high = upper_32_bits(bus_addr); 1508 auth_desc->img_low = lower_32_bits(bus_addr); 1509 auth_desc->img_len = size - qat_uclo_simg_hdr2sign_len(handle); 1510 if (bus_addr + auth_desc->img_len > 1511 dram_desc->dram_bus_addr + ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN) { 1512 pr_err("insufficient memory size for authentication data\n"); 1513 qat_uclo_simg_free(handle, dram_desc); 1514 return -ENOMEM; 1515 } 1516 1517 memcpy(virt_addr, image + qat_uclo_simg_hdr2sign_len(handle), auth_desc->img_len); 1518 virt_addr = virt_base; 1519 /* AE firmware */ 1520 if (fw_type == CSS_AE_FIRMWARE) { 1521 auth_desc->img_ae_mode_data_high = auth_desc->img_high; 1522 auth_desc->img_ae_mode_data_low = auth_desc->img_low; 1523 bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high, 1524 auth_desc->img_ae_mode_data_low) + 1525 sizeof(struct icp_qat_simg_ae_mode); 1526 1527 auth_desc->img_ae_init_data_high = upper_32_bits(bus_addr); 1528 auth_desc->img_ae_init_data_low = lower_32_bits(bus_addr); 1529 bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN; 1530 auth_desc->img_ae_insts_high = upper_32_bits(bus_addr); 1531 auth_desc->img_ae_insts_low = lower_32_bits(bus_addr); 1532 virt_addr += sizeof(struct icp_qat_css_hdr); 1533 virt_addr += ICP_QAT_CSS_FWSK_PUB_LEN(handle); 1534 virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(handle); 1535 simg_ae_mode = (struct icp_qat_simg_ae_mode *)(uintptr_t)virt_addr; 1536 auth_desc->ae_mask = simg_ae_mode->ae_mask & handle->cfg_ae_mask; 1537 } else { 1538 auth_desc->img_ae_insts_high = auth_desc->img_high; 1539 auth_desc->img_ae_insts_low = auth_desc->img_low; 1540 } 1541 *desc = auth_desc; 1542 return 0; 1543 } 1544 1545 static int qat_uclo_build_auth_desc_dualsign(struct icp_qat_fw_loader_handle *handle, 1546 char *image, unsigned int size, 1547 struct icp_firml_dram_desc *dram_desc, 1548 unsigned int fw_type, 1549 struct icp_qat_fw_auth_desc **desc) 1550 { 1551 struct icp_qat_simg_ae_mode *simg_ae_mode; 1552 struct icp_qat_fw_auth_desc *auth_desc; 1553 unsigned int chunk_offset, img_offset; 1554 u64 bus_addr, addr; 1555 char *virt_addr; 1556 1557 virt_addr = dram_desc->dram_base_addr_v; 1558 virt_addr += sizeof(struct icp_qat_auth_chunk); 1559 bus_addr = dram_desc->dram_bus_addr + sizeof(struct icp_qat_auth_chunk); 1560 1561 auth_desc = dram_desc->dram_base_addr_v; 1562 auth_desc->img_len = size - qat_uclo_simg_hdr2sign_len(handle); 1563 auth_desc->css_hdr_high = upper_32_bits(bus_addr); 1564 auth_desc->css_hdr_low = lower_32_bits(bus_addr); 1565 memcpy(virt_addr, image, ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN); 1566 1567 img_offset = ICP_QAT_DUALSIGN_OPAQUE_HDR_LEN; 1568 chunk_offset = ICP_QAT_DUALSIGN_OPAQUE_HDR_ALIGN_LEN; 1569 1570 /* RSA pub key */ 1571 addr = bus_addr + chunk_offset; 1572 auth_desc->fwsk_pub_high = upper_32_bits(addr); 1573 auth_desc->fwsk_pub_low = lower_32_bits(addr); 1574 memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_FWSK_MODULUS_LEN(handle)); 1575 1576 img_offset += ICP_QAT_CSS_FWSK_MODULUS_LEN(handle); 1577 chunk_offset += ICP_QAT_CSS_FWSK_MODULUS_LEN(handle); 1578 /* RSA padding */ 1579 memset(virt_addr + chunk_offset, 0, ICP_QAT_CSS_FWSK_PAD_LEN(handle)); 1580 1581 chunk_offset += ICP_QAT_CSS_FWSK_PAD_LEN(handle); 1582 /* RSA exponent */ 1583 memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle)); 1584 1585 img_offset += ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle); 1586 chunk_offset += ICP_QAT_CSS_FWSK_EXPONENT_LEN(handle); 1587 /* RSA signature */ 1588 addr = bus_addr + chunk_offset; 1589 auth_desc->signature_high = upper_32_bits(addr); 1590 auth_desc->signature_low = lower_32_bits(addr); 1591 memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_CSS_SIGNATURE_LEN(handle)); 1592 1593 img_offset += ICP_QAT_CSS_SIGNATURE_LEN(handle); 1594 chunk_offset += ICP_QAT_CSS_SIGNATURE_LEN(handle); 1595 /* XMSS pubkey */ 1596 addr = bus_addr + chunk_offset; 1597 auth_desc->xmss_pubkey_high = upper_32_bits(addr); 1598 auth_desc->xmss_pubkey_low = lower_32_bits(addr); 1599 memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN); 1600 1601 img_offset += ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN; 1602 chunk_offset += ICP_QAT_DUALSIGN_XMSS_PUBKEY_LEN; 1603 /* XMSS signature */ 1604 addr = bus_addr + chunk_offset; 1605 auth_desc->xmss_sig_high = upper_32_bits(addr); 1606 auth_desc->xmss_sig_low = lower_32_bits(addr); 1607 memcpy(virt_addr + chunk_offset, image + img_offset, ICP_QAT_DUALSIGN_XMSS_SIG_LEN); 1608 1609 img_offset += ICP_QAT_DUALSIGN_XMSS_SIG_LEN; 1610 chunk_offset += ICP_QAT_DUALSIGN_XMSS_SIG_ALIGN_LEN; 1611 1612 if (dram_desc->dram_size < (chunk_offset + auth_desc->img_len)) { 1613 pr_err("auth chunk memory size is not enough to store data\n"); 1614 return -ENOMEM; 1615 } 1616 1617 /* Signed data */ 1618 addr = bus_addr + chunk_offset; 1619 auth_desc->img_high = upper_32_bits(addr); 1620 auth_desc->img_low = lower_32_bits(addr); 1621 memcpy(virt_addr + chunk_offset, image + img_offset, auth_desc->img_len); 1622 1623 chunk_offset += ICP_QAT_DUALSIGN_MISC_INFO_LEN; 1624 /* AE firmware */ 1625 if (fw_type == CSS_AE_FIRMWARE) { 1626 /* AE mode data */ 1627 addr = bus_addr + chunk_offset; 1628 auth_desc->img_ae_mode_data_high = upper_32_bits(addr); 1629 auth_desc->img_ae_mode_data_low = lower_32_bits(addr); 1630 simg_ae_mode = 1631 (struct icp_qat_simg_ae_mode *)(virt_addr + chunk_offset); 1632 auth_desc->ae_mask = simg_ae_mode->ae_mask & handle->cfg_ae_mask; 1633 1634 chunk_offset += sizeof(struct icp_qat_simg_ae_mode); 1635 /* AE init seq */ 1636 addr = bus_addr + chunk_offset; 1637 auth_desc->img_ae_init_data_high = upper_32_bits(addr); 1638 auth_desc->img_ae_init_data_low = lower_32_bits(addr); 1639 1640 chunk_offset += ICP_QAT_SIMG_AE_INIT_SEQ_LEN; 1641 /* AE instructions */ 1642 addr = bus_addr + chunk_offset; 1643 auth_desc->img_ae_insts_high = upper_32_bits(addr); 1644 auth_desc->img_ae_insts_low = lower_32_bits(addr); 1645 } else { 1646 addr = bus_addr + chunk_offset; 1647 auth_desc->img_ae_insts_high = upper_32_bits(addr); 1648 auth_desc->img_ae_insts_low = lower_32_bits(addr); 1649 } 1650 *desc = auth_desc; 1651 return 0; 1652 } 1653 1654 static int qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, 1655 char *image, unsigned int size, 1656 struct icp_qat_fw_auth_desc **desc) 1657 { 1658 struct icp_qat_auth_chunk *auth_chunk; 1659 struct icp_firml_dram_desc img_desc; 1660 unsigned int simg_fw_type; 1661 int ret; 1662 1663 ret = qat_uclo_simg_alloc(handle, &img_desc, ICP_QAT_CSS_RSA4K_MAX_IMAGE_LEN); 1664 if (ret) 1665 return ret; 1666 1667 simg_fw_type = qat_uclo_simg_fw_type(handle, image); 1668 auth_chunk = img_desc.dram_base_addr_v; 1669 auth_chunk->chunk_size = img_desc.dram_size; 1670 auth_chunk->chunk_bus_addr = img_desc.dram_bus_addr; 1671 1672 if (handle->chip_info->dual_sign) 1673 return qat_uclo_build_auth_desc_dualsign(handle, image, size, &img_desc, 1674 simg_fw_type, desc); 1675 1676 return qat_uclo_build_auth_desc_RSA(handle, image, size, &img_desc, 1677 simg_fw_type, desc); 1678 } 1679 1680 static int qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle, 1681 struct icp_qat_fw_auth_desc *desc) 1682 { 1683 unsigned long ae_mask = handle->hal_handle->ae_mask; 1684 u32 fcu_sts_csr, fcu_ctl_csr; 1685 u32 loaded_aes, loaded_csr; 1686 unsigned int i; 1687 u32 fcu_sts; 1688 1689 fcu_ctl_csr = handle->chip_info->fcu_ctl_csr; 1690 fcu_sts_csr = handle->chip_info->fcu_sts_csr; 1691 loaded_csr = handle->chip_info->fcu_loaded_ae_csr; 1692 1693 for_each_set_bit(i, &ae_mask, handle->hal_handle->ae_max_num) { 1694 int retry = 0; 1695 1696 if (!((desc->ae_mask >> i) & 0x1)) 1697 continue; 1698 if (qat_hal_check_ae_active(handle, i)) { 1699 pr_err("AE %d is active\n", i); 1700 return -EINVAL; 1701 } 1702 SET_CAP_CSR(handle, fcu_ctl_csr, 1703 (FCU_CTRL_CMD_LOAD | 1704 (1 << FCU_CTRL_BROADCAST_POS) | 1705 (i << FCU_CTRL_AE_POS))); 1706 1707 do { 1708 msleep(FW_AUTH_WAIT_PERIOD); 1709 fcu_sts = GET_CAP_CSR(handle, fcu_sts_csr); 1710 if ((fcu_sts & FCU_AUTH_STS_MASK) == 1711 FCU_STS_LOAD_DONE) { 1712 loaded_aes = GET_CAP_CSR(handle, loaded_csr); 1713 loaded_aes >>= handle->chip_info->fcu_loaded_ae_pos; 1714 if (loaded_aes & (1 << i)) 1715 break; 1716 } 1717 } while (retry++ < FW_AUTH_MAX_RETRY); 1718 if (retry > FW_AUTH_MAX_RETRY) { 1719 pr_err("firmware load failed timeout %x\n", retry); 1720 return -EINVAL; 1721 } 1722 } 1723 return 0; 1724 } 1725 1726 static int qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle, 1727 void *addr_ptr, int mem_size) 1728 { 1729 struct icp_qat_suof_handle *suof_handle; 1730 1731 suof_handle = kzalloc(sizeof(*suof_handle), GFP_KERNEL); 1732 if (!suof_handle) 1733 return -ENOMEM; 1734 handle->sobj_handle = suof_handle; 1735 if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) { 1736 qat_uclo_del_suof(handle); 1737 pr_err("map SUOF failed\n"); 1738 return -EINVAL; 1739 } 1740 return 0; 1741 } 1742 1743 int qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, 1744 void *addr_ptr, int mem_size) 1745 { 1746 struct icp_qat_fw_auth_desc *desc = NULL; 1747 int status = 0; 1748 int ret; 1749 1750 ret = qat_uclo_check_image(handle, addr_ptr, mem_size, CSS_MMP_FIRMWARE); 1751 if (ret) 1752 return ret; 1753 1754 if (handle->chip_info->fw_auth) { 1755 status = qat_uclo_map_auth_fw(handle, addr_ptr, mem_size, &desc); 1756 if (!status) 1757 status = qat_uclo_auth_fw(handle, desc); 1758 qat_uclo_ummap_auth_fw(handle, &desc); 1759 } else { 1760 if (handle->chip_info->mmp_sram_size < mem_size) { 1761 pr_err("MMP size is too large: 0x%x\n", mem_size); 1762 return -EFBIG; 1763 } 1764 qat_uclo_wr_sram_by_words(handle, 0, addr_ptr, mem_size); 1765 } 1766 return status; 1767 } 1768 1769 static int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, 1770 void *addr_ptr, int mem_size) 1771 { 1772 struct icp_qat_uof_filehdr *filehdr; 1773 struct icp_qat_uclo_objhandle *objhdl; 1774 1775 objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL); 1776 if (!objhdl) 1777 return -ENOMEM; 1778 objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL); 1779 if (!objhdl->obj_buf) 1780 goto out_objbuf_err; 1781 filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf; 1782 if (qat_uclo_check_uof_format(filehdr)) 1783 goto out_objhdr_err; 1784 objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr, 1785 ICP_QAT_UOF_OBJS); 1786 if (!objhdl->obj_hdr) { 1787 pr_err("object file chunk is null\n"); 1788 goto out_objhdr_err; 1789 } 1790 handle->obj_handle = objhdl; 1791 if (qat_uclo_parse_uof_obj(handle)) 1792 goto out_overlay_obj_err; 1793 return 0; 1794 1795 out_overlay_obj_err: 1796 handle->obj_handle = NULL; 1797 kfree(objhdl->obj_hdr); 1798 out_objhdr_err: 1799 kfree(objhdl->obj_buf); 1800 out_objbuf_err: 1801 kfree(objhdl); 1802 return -ENOMEM; 1803 } 1804 1805 static int qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle, 1806 struct icp_qat_mof_file_hdr *mof_ptr, 1807 u32 mof_size) 1808 { 1809 struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle; 1810 unsigned int min_ver_offset; 1811 unsigned int checksum; 1812 1813 mobj_handle->file_id = ICP_QAT_MOF_FID; 1814 mobj_handle->mof_buf = (char *)mof_ptr; 1815 mobj_handle->mof_size = mof_size; 1816 1817 min_ver_offset = mof_size - offsetof(struct icp_qat_mof_file_hdr, 1818 min_ver); 1819 checksum = qat_uclo_calc_str_checksum(&mof_ptr->min_ver, 1820 min_ver_offset); 1821 if (checksum != mof_ptr->checksum) { 1822 pr_err("incorrect MOF checksum\n"); 1823 return -EINVAL; 1824 } 1825 1826 mobj_handle->checksum = mof_ptr->checksum; 1827 mobj_handle->min_ver = mof_ptr->min_ver; 1828 mobj_handle->maj_ver = mof_ptr->maj_ver; 1829 return 0; 1830 } 1831 1832 static void qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle) 1833 { 1834 struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle; 1835 1836 kfree(mobj_handle->obj_table.obj_hdr); 1837 mobj_handle->obj_table.obj_hdr = NULL; 1838 kfree(handle->mobj_handle); 1839 handle->mobj_handle = NULL; 1840 } 1841 1842 static int qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle, 1843 const char *obj_name, char **obj_ptr, 1844 unsigned int *obj_size) 1845 { 1846 struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr; 1847 unsigned int i; 1848 1849 for (i = 0; i < mobj_handle->obj_table.num_objs; i++) { 1850 if (!strncmp(obj_hdr[i].obj_name, obj_name, 1851 ICP_QAT_SUOF_OBJ_NAME_LEN)) { 1852 *obj_ptr = obj_hdr[i].obj_buf; 1853 *obj_size = obj_hdr[i].obj_size; 1854 return 0; 1855 } 1856 } 1857 1858 pr_err("object %s is not found inside MOF\n", obj_name); 1859 return -EINVAL; 1860 } 1861 1862 static int qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle, 1863 struct icp_qat_mof_objhdr *mobj_hdr, 1864 struct icp_qat_mof_obj_chunkhdr *obj_chunkhdr) 1865 { 1866 u8 *obj; 1867 1868 if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_UOF_IMAG, 1869 ICP_QAT_MOF_OBJ_CHUNKID_LEN)) { 1870 obj = mobj_handle->uobjs_hdr + obj_chunkhdr->offset; 1871 } else if (!strncmp(obj_chunkhdr->chunk_id, ICP_QAT_SUOF_IMAG, 1872 ICP_QAT_MOF_OBJ_CHUNKID_LEN)) { 1873 obj = mobj_handle->sobjs_hdr + obj_chunkhdr->offset; 1874 } else { 1875 pr_err("unsupported chunk id\n"); 1876 return -EINVAL; 1877 } 1878 mobj_hdr->obj_buf = obj; 1879 mobj_hdr->obj_size = (unsigned int)obj_chunkhdr->size; 1880 mobj_hdr->obj_name = obj_chunkhdr->name + mobj_handle->sym_str; 1881 return 0; 1882 } 1883 1884 static int qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle) 1885 { 1886 struct icp_qat_mof_obj_chunkhdr *uobj_chunkhdr; 1887 struct icp_qat_mof_obj_chunkhdr *sobj_chunkhdr; 1888 struct icp_qat_mof_obj_hdr *uobj_hdr; 1889 struct icp_qat_mof_obj_hdr *sobj_hdr; 1890 struct icp_qat_mof_objhdr *mobj_hdr; 1891 unsigned int uobj_chunk_num = 0; 1892 unsigned int sobj_chunk_num = 0; 1893 unsigned int *valid_chunk; 1894 int ret, i; 1895 1896 uobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->uobjs_hdr; 1897 sobj_hdr = (struct icp_qat_mof_obj_hdr *)mobj_handle->sobjs_hdr; 1898 if (uobj_hdr) 1899 uobj_chunk_num = uobj_hdr->num_chunks; 1900 if (sobj_hdr) 1901 sobj_chunk_num = sobj_hdr->num_chunks; 1902 1903 mobj_hdr = kzalloc((uobj_chunk_num + sobj_chunk_num) * 1904 sizeof(*mobj_hdr), GFP_KERNEL); 1905 if (!mobj_hdr) 1906 return -ENOMEM; 1907 1908 mobj_handle->obj_table.obj_hdr = mobj_hdr; 1909 valid_chunk = &mobj_handle->obj_table.num_objs; 1910 uobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *) 1911 ((uintptr_t)uobj_hdr + sizeof(*uobj_hdr)); 1912 sobj_chunkhdr = (struct icp_qat_mof_obj_chunkhdr *) 1913 ((uintptr_t)sobj_hdr + sizeof(*sobj_hdr)); 1914 1915 /* map uof objects */ 1916 for (i = 0; i < uobj_chunk_num; i++) { 1917 ret = qat_uclo_map_obj_from_mof(mobj_handle, 1918 &mobj_hdr[*valid_chunk], 1919 &uobj_chunkhdr[i]); 1920 if (ret) 1921 return ret; 1922 (*valid_chunk)++; 1923 } 1924 1925 /* map suof objects */ 1926 for (i = 0; i < sobj_chunk_num; i++) { 1927 ret = qat_uclo_map_obj_from_mof(mobj_handle, 1928 &mobj_hdr[*valid_chunk], 1929 &sobj_chunkhdr[i]); 1930 if (ret) 1931 return ret; 1932 (*valid_chunk)++; 1933 } 1934 1935 if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunk) { 1936 pr_err("inconsistent UOF/SUOF chunk amount\n"); 1937 return -EINVAL; 1938 } 1939 return 0; 1940 } 1941 1942 static void qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle *mobj_handle, 1943 struct icp_qat_mof_chunkhdr *mof_chunkhdr) 1944 { 1945 char **sym_str = (char **)&mobj_handle->sym_str; 1946 unsigned int *sym_size = &mobj_handle->sym_size; 1947 struct icp_qat_mof_str_table *str_table_obj; 1948 1949 *sym_size = *(unsigned int *)(uintptr_t) 1950 (mof_chunkhdr->offset + mobj_handle->mof_buf); 1951 *sym_str = (char *)(uintptr_t) 1952 (mobj_handle->mof_buf + mof_chunkhdr->offset + 1953 sizeof(str_table_obj->tab_len)); 1954 } 1955 1956 static void qat_uclo_map_mof_chunk(struct icp_qat_mof_handle *mobj_handle, 1957 struct icp_qat_mof_chunkhdr *mof_chunkhdr) 1958 { 1959 char *chunk_id = mof_chunkhdr->chunk_id; 1960 1961 if (!strncmp(chunk_id, ICP_QAT_MOF_SYM_OBJS, ICP_QAT_MOF_OBJ_ID_LEN)) 1962 qat_uclo_map_mof_symobjs(mobj_handle, mof_chunkhdr); 1963 else if (!strncmp(chunk_id, ICP_QAT_UOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN)) 1964 mobj_handle->uobjs_hdr = mobj_handle->mof_buf + 1965 mof_chunkhdr->offset; 1966 else if (!strncmp(chunk_id, ICP_QAT_SUOF_OBJS, ICP_QAT_MOF_OBJ_ID_LEN)) 1967 mobj_handle->sobjs_hdr = mobj_handle->mof_buf + 1968 mof_chunkhdr->offset; 1969 } 1970 1971 static int qat_uclo_check_mof_format(struct icp_qat_mof_file_hdr *mof_hdr) 1972 { 1973 int maj = mof_hdr->maj_ver & 0xff; 1974 int min = mof_hdr->min_ver & 0xff; 1975 1976 if (mof_hdr->file_id != ICP_QAT_MOF_FID) { 1977 pr_err("invalid header 0x%x\n", mof_hdr->file_id); 1978 return -EINVAL; 1979 } 1980 1981 if (mof_hdr->num_chunks <= 0x1) { 1982 pr_err("MOF chunk amount is incorrect\n"); 1983 return -EINVAL; 1984 } 1985 if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) { 1986 pr_err("bad MOF version, major 0x%x, minor 0x%x\n", maj, min); 1987 return -EINVAL; 1988 } 1989 return 0; 1990 } 1991 1992 static int qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle, 1993 struct icp_qat_mof_file_hdr *mof_ptr, 1994 u32 mof_size, const char *obj_name, 1995 char **obj_ptr, unsigned int *obj_size) 1996 { 1997 struct icp_qat_mof_chunkhdr *mof_chunkhdr; 1998 unsigned int file_id = mof_ptr->file_id; 1999 struct icp_qat_mof_handle *mobj_handle; 2000 unsigned short chunks_num; 2001 unsigned int i; 2002 int ret; 2003 2004 if (file_id == ICP_QAT_UOF_FID || file_id == ICP_QAT_SUOF_FID) { 2005 if (obj_ptr) 2006 *obj_ptr = (char *)mof_ptr; 2007 if (obj_size) 2008 *obj_size = mof_size; 2009 return 0; 2010 } 2011 if (qat_uclo_check_mof_format(mof_ptr)) 2012 return -EINVAL; 2013 2014 mobj_handle = kzalloc(sizeof(*mobj_handle), GFP_KERNEL); 2015 if (!mobj_handle) 2016 return -ENOMEM; 2017 2018 handle->mobj_handle = mobj_handle; 2019 ret = qat_uclo_map_mof_file_hdr(handle, mof_ptr, mof_size); 2020 if (ret) 2021 return ret; 2022 2023 mof_chunkhdr = (void *)mof_ptr + sizeof(*mof_ptr); 2024 chunks_num = mof_ptr->num_chunks; 2025 2026 /* Parse MOF file chunks */ 2027 for (i = 0; i < chunks_num; i++) 2028 qat_uclo_map_mof_chunk(mobj_handle, &mof_chunkhdr[i]); 2029 2030 /* All sym_objs uobjs and sobjs should be available */ 2031 if (!mobj_handle->sym_str || 2032 (!mobj_handle->uobjs_hdr && !mobj_handle->sobjs_hdr)) 2033 return -EINVAL; 2034 2035 ret = qat_uclo_map_objs_from_mof(mobj_handle); 2036 if (ret) 2037 return ret; 2038 2039 /* Seek specified uof object in MOF */ 2040 return qat_uclo_seek_obj_inside_mof(mobj_handle, obj_name, 2041 obj_ptr, obj_size); 2042 } 2043 2044 int qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, 2045 void *addr_ptr, u32 mem_size, const char *obj_name) 2046 { 2047 char *obj_addr; 2048 u32 obj_size; 2049 int ret; 2050 2051 BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >= 2052 (sizeof(handle->hal_handle->ae_mask) * 8)); 2053 2054 if (!handle || !addr_ptr || mem_size < 24) 2055 return -EINVAL; 2056 2057 if (obj_name) { 2058 ret = qat_uclo_map_mof_obj(handle, addr_ptr, mem_size, obj_name, 2059 &obj_addr, &obj_size); 2060 if (ret) 2061 return ret; 2062 } else { 2063 obj_addr = addr_ptr; 2064 obj_size = mem_size; 2065 } 2066 2067 return (handle->chip_info->fw_auth) ? 2068 qat_uclo_map_suof_obj(handle, obj_addr, obj_size) : 2069 qat_uclo_map_uof_obj(handle, obj_addr, obj_size); 2070 } 2071 2072 void qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle) 2073 { 2074 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 2075 unsigned int a; 2076 2077 if (handle->mobj_handle) 2078 qat_uclo_del_mof(handle); 2079 if (handle->sobj_handle) 2080 qat_uclo_del_suof(handle); 2081 if (!obj_handle) 2082 return; 2083 2084 kfree(obj_handle->uword_buf); 2085 for (a = 0; a < obj_handle->uimage_num; a++) 2086 kfree(obj_handle->ae_uimage[a].page); 2087 2088 for (a = 0; a < handle->hal_handle->ae_max_num; a++) 2089 qat_uclo_free_ae_data(&obj_handle->ae_data[a]); 2090 2091 kfree(obj_handle->obj_hdr); 2092 kfree(obj_handle->obj_buf); 2093 kfree(obj_handle); 2094 handle->obj_handle = NULL; 2095 } 2096 2097 static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle, 2098 struct icp_qat_uclo_encap_page *encap_page, 2099 u64 *uword, unsigned int addr_p, 2100 unsigned int raddr, u64 fill) 2101 { 2102 unsigned int i, addr; 2103 u64 uwrd = 0; 2104 2105 if (!encap_page) { 2106 *uword = fill; 2107 return; 2108 } 2109 addr = (encap_page->page_region) ? raddr : addr_p; 2110 for (i = 0; i < encap_page->uwblock_num; i++) { 2111 if (addr >= encap_page->uwblock[i].start_addr && 2112 addr <= encap_page->uwblock[i].start_addr + 2113 encap_page->uwblock[i].words_num - 1) { 2114 addr -= encap_page->uwblock[i].start_addr; 2115 addr *= obj_handle->uword_in_bytes; 2116 memcpy(&uwrd, (void *)(((uintptr_t) 2117 encap_page->uwblock[i].micro_words) + addr), 2118 obj_handle->uword_in_bytes); 2119 uwrd = uwrd & GENMASK_ULL(43, 0); 2120 } 2121 } 2122 *uword = uwrd; 2123 if (*uword == INVLD_UWORD) 2124 *uword = fill; 2125 } 2126 2127 static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle, 2128 struct icp_qat_uclo_encap_page 2129 *encap_page, unsigned int ae) 2130 { 2131 unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen; 2132 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 2133 u64 fill_pat; 2134 2135 /* load the page starting at appropriate ustore address */ 2136 /* get fill-pattern from an image -- they are all the same */ 2137 memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern, 2138 sizeof(u64)); 2139 uw_physical_addr = encap_page->beg_addr_p; 2140 uw_relative_addr = 0; 2141 words_num = encap_page->micro_words_num; 2142 while (words_num) { 2143 cpylen = min(words_num, UWORD_CPYBUF_SIZE); 2144 2145 /* load the buffer */ 2146 for (i = 0; i < cpylen; i++) 2147 qat_uclo_fill_uwords(obj_handle, encap_page, 2148 &obj_handle->uword_buf[i], 2149 uw_physical_addr + i, 2150 uw_relative_addr + i, fill_pat); 2151 2152 /* copy the buffer to ustore */ 2153 qat_hal_wr_uwords(handle, (unsigned char)ae, 2154 uw_physical_addr, cpylen, 2155 obj_handle->uword_buf); 2156 2157 uw_physical_addr += cpylen; 2158 uw_relative_addr += cpylen; 2159 words_num -= cpylen; 2160 } 2161 } 2162 2163 static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle, 2164 struct icp_qat_uof_image *image) 2165 { 2166 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 2167 unsigned long ae_mask = handle->hal_handle->ae_mask; 2168 unsigned long cfg_ae_mask = handle->cfg_ae_mask; 2169 unsigned long ae_assigned = image->ae_assigned; 2170 struct icp_qat_uclo_aedata *aed; 2171 unsigned int ctx_mask, s; 2172 struct icp_qat_uclo_page *page; 2173 unsigned char ae; 2174 int ctx; 2175 2176 if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX) 2177 ctx_mask = 0xff; 2178 else 2179 ctx_mask = 0x55; 2180 /* load the default page and set assigned CTX PC 2181 * to the entrypoint address */ 2182 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) { 2183 if (!test_bit(ae, &cfg_ae_mask)) 2184 continue; 2185 2186 if (!test_bit(ae, &ae_assigned)) 2187 continue; 2188 2189 aed = &obj_handle->ae_data[ae]; 2190 /* find the slice to which this image is assigned */ 2191 for (s = 0; s < aed->slice_num; s++) { 2192 if (image->ctx_assigned & 2193 aed->ae_slices[s].ctx_mask_assigned) 2194 break; 2195 } 2196 if (s >= aed->slice_num) 2197 continue; 2198 page = aed->ae_slices[s].page; 2199 if (!page->encap_page->def_page) 2200 continue; 2201 qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae); 2202 2203 page = aed->ae_slices[s].page; 2204 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) 2205 aed->ae_slices[s].cur_page[ctx] = 2206 (ctx_mask & (1 << ctx)) ? page : NULL; 2207 qat_hal_set_live_ctx(handle, (unsigned char)ae, 2208 image->ctx_assigned); 2209 qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned, 2210 image->entry_address); 2211 } 2212 } 2213 2214 static int qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle) 2215 { 2216 unsigned int i; 2217 struct icp_qat_fw_auth_desc *desc = NULL; 2218 struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; 2219 struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr; 2220 int ret; 2221 2222 for (i = 0; i < sobj_handle->img_table.num_simgs; i++) { 2223 ret = qat_uclo_check_image(handle, simg_hdr[i].simg_buf, 2224 simg_hdr[i].simg_len, 2225 CSS_AE_FIRMWARE); 2226 if (ret) 2227 return ret; 2228 2229 if (qat_uclo_map_auth_fw(handle, 2230 (char *)simg_hdr[i].simg_buf, 2231 (unsigned int) 2232 simg_hdr[i].simg_len, 2233 &desc)) 2234 goto wr_err; 2235 if (qat_uclo_auth_fw(handle, desc)) 2236 goto wr_err; 2237 if (qat_uclo_is_broadcast(handle, i)) { 2238 if (qat_uclo_broadcast_load_fw(handle, desc)) 2239 goto wr_err; 2240 } else { 2241 if (qat_uclo_load_fw(handle, desc)) 2242 goto wr_err; 2243 } 2244 qat_uclo_ummap_auth_fw(handle, &desc); 2245 } 2246 return 0; 2247 wr_err: 2248 qat_uclo_ummap_auth_fw(handle, &desc); 2249 return -EINVAL; 2250 } 2251 2252 static int qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle) 2253 { 2254 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 2255 unsigned int i; 2256 2257 if (qat_uclo_init_globals(handle)) 2258 return -EINVAL; 2259 for (i = 0; i < obj_handle->uimage_num; i++) { 2260 if (!obj_handle->ae_uimage[i].img_ptr) 2261 return -EINVAL; 2262 if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i])) 2263 return -EINVAL; 2264 qat_uclo_wr_uimage_page(handle, 2265 obj_handle->ae_uimage[i].img_ptr); 2266 } 2267 return 0; 2268 } 2269 2270 int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) 2271 { 2272 return (handle->chip_info->fw_auth) ? qat_uclo_wr_suof_img(handle) : 2273 qat_uclo_wr_uof_img(handle); 2274 } 2275 2276 int qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle, 2277 unsigned int cfg_ae_mask) 2278 { 2279 if (!cfg_ae_mask) 2280 return -EINVAL; 2281 2282 handle->cfg_ae_mask = cfg_ae_mask; 2283 return 0; 2284 } 2285