1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright(c) 2007-2025 Intel Corporation */ 3 #include "qat_freebsd.h" 4 #include "adf_cfg.h" 5 #include "adf_common_drv.h" 6 #include "adf_accel_devices.h" 7 #include "icp_qat_uclo.h" 8 #include "icp_qat_fw.h" 9 #include "icp_qat_fw_init_admin.h" 10 #include "adf_cfg_strings.h" 11 #include "adf_transport_access_macros.h" 12 #include "adf_transport_internal.h" 13 #include <sys/ctype.h> 14 #include <sys/kernel.h> 15 #include <linux/delay.h> 16 #include "adf_accel_devices.h" 17 #include "adf_common_drv.h" 18 #include "icp_qat_uclo.h" 19 #include "icp_qat_hal.h" 20 #include "icp_qat_fw_loader_handle.h" 21 22 #define UWORD_CPYBUF_SIZE 1024 23 #define INVLD_UWORD 0xffffffffffull 24 #define PID_MINOR_REV 0xf 25 #define PID_MAJOR_REV (0xf << 4) 26 #define MAX_UINT32_VAL 0xfffffffful 27 28 static int 29 qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle, 30 unsigned int ae, 31 unsigned int image_num) 32 { 33 struct icp_qat_uclo_aedata *ae_data; 34 struct icp_qat_uclo_encapme *encap_image; 35 struct icp_qat_uclo_page *page = NULL; 36 struct icp_qat_uclo_aeslice *ae_slice = NULL; 37 38 ae_data = &obj_handle->ae_data[ae]; 39 encap_image = &obj_handle->ae_uimage[image_num]; 40 ae_slice = &ae_data->ae_slices[ae_data->slice_num]; 41 ae_slice->encap_image = encap_image; 42 43 if (encap_image->img_ptr) { 44 ae_slice->ctx_mask_assigned = 45 encap_image->img_ptr->ctx_assigned; 46 ae_data->shareable_ustore = 47 ICP_QAT_SHARED_USTORE_MODE(encap_image->img_ptr->ae_mode); 48 if (obj_handle->prod_type == ICP_QAT_AC_4XXX_A_DEV_TYPE) 49 ae_data->eff_ustore_size = obj_handle->ustore_phy_size; 50 else { 51 ae_data->eff_ustore_size = ae_data->shareable_ustore ? 52 (obj_handle->ustore_phy_size << 1) : 53 obj_handle->ustore_phy_size; 54 } 55 } else { 56 ae_slice->ctx_mask_assigned = 0; 57 } 58 ae_slice->region = 59 malloc(sizeof(*ae_slice->region), M_QAT, M_WAITOK | M_ZERO); 60 ae_slice->page = 61 malloc(sizeof(*ae_slice->page), M_QAT, M_WAITOK | M_ZERO); 62 page = ae_slice->page; 63 page->encap_page = encap_image->page; 64 ae_slice->page->region = ae_slice->region; 65 ae_data->slice_num++; 66 return 0; 67 } 68 69 static int 70 qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data) 71 { 72 unsigned int i; 73 74 if (!ae_data) { 75 pr_err("QAT: bad argument, ae_data is NULL\n "); 76 return EINVAL; 77 } 78 79 for (i = 0; i < ae_data->slice_num; i++) { 80 free(ae_data->ae_slices[i].region, M_QAT); 81 ae_data->ae_slices[i].region = NULL; 82 free(ae_data->ae_slices[i].page, M_QAT); 83 ae_data->ae_slices[i].page = NULL; 84 } 85 return 0; 86 } 87 88 static char * 89 qat_uclo_get_string(struct icp_qat_uof_strtable *str_table, 90 unsigned int str_offset) 91 { 92 if (!str_table->table_len || str_offset > str_table->table_len) 93 return NULL; 94 return (char *)(((uintptr_t)(str_table->strings)) + str_offset); 95 } 96 97 static int 98 qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr) 99 { 100 int maj = hdr->maj_ver & 0xff; 101 int min = hdr->min_ver & 0xff; 102 103 if (hdr->file_id != ICP_QAT_UOF_FID) { 104 pr_err("QAT: Invalid header 0x%x\n", hdr->file_id); 105 return EINVAL; 106 } 107 if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) { 108 pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n", 109 maj, 110 min); 111 return EINVAL; 112 } 113 return 0; 114 } 115 116 static int 117 qat_uclo_check_suof_format(const struct icp_qat_suof_filehdr *suof_hdr) 118 { 119 int maj = suof_hdr->maj_ver & 0xff; 120 int min = suof_hdr->min_ver & 0xff; 121 122 if (suof_hdr->file_id != ICP_QAT_SUOF_FID) { 123 pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id); 124 return EINVAL; 125 } 126 if (suof_hdr->fw_type != 0) { 127 pr_err("QAT: unsupported firmware type\n"); 128 return EINVAL; 129 } 130 if (suof_hdr->num_chunks <= 0x1) { 131 pr_err("QAT: SUOF chunk amount is incorrect\n"); 132 return EINVAL; 133 } 134 if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) { 135 pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n", 136 maj, 137 min); 138 return EINVAL; 139 } 140 return 0; 141 } 142 143 static int 144 qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle, 145 unsigned int addr, 146 const unsigned int *val, 147 unsigned int num_in_bytes) 148 { 149 unsigned int outval; 150 const unsigned char *ptr = (const unsigned char *)val; 151 152 if (num_in_bytes > handle->hal_sram_size) { 153 pr_err("QAT: error, mmp size overflow %d\n", num_in_bytes); 154 return EINVAL; 155 } 156 while (num_in_bytes) { 157 memcpy(&outval, ptr, 4); 158 SRAM_WRITE(handle, addr, outval); 159 num_in_bytes -= 4; 160 ptr += 4; 161 addr += 4; 162 } 163 return 0; 164 } 165 166 static void 167 qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle, 168 unsigned char ae, 169 unsigned int addr, 170 unsigned int *val, 171 unsigned int num_in_bytes) 172 { 173 unsigned int outval; 174 unsigned char *ptr = (unsigned char *)val; 175 176 addr >>= 0x2; /* convert to uword address */ 177 178 while (num_in_bytes) { 179 memcpy(&outval, ptr, 4); 180 qat_hal_wr_umem(handle, ae, addr++, 1, &outval); 181 num_in_bytes -= 4; 182 ptr += 4; 183 } 184 } 185 186 static void 187 qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle, 188 unsigned char ae, 189 struct icp_qat_uof_batch_init *umem_init_header) 190 { 191 struct icp_qat_uof_batch_init *umem_init; 192 193 if (!umem_init_header) 194 return; 195 umem_init = umem_init_header->next; 196 while (umem_init) { 197 unsigned int addr, *value, size; 198 199 ae = umem_init->ae; 200 addr = umem_init->addr; 201 value = umem_init->value; 202 size = umem_init->size; 203 qat_uclo_wr_umem_by_words(handle, ae, addr, value, size); 204 umem_init = umem_init->next; 205 } 206 } 207 208 static void 209 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle, 210 struct icp_qat_uof_batch_init **base) 211 { 212 struct icp_qat_uof_batch_init *umem_init; 213 214 umem_init = *base; 215 while (umem_init) { 216 struct icp_qat_uof_batch_init *pre; 217 218 pre = umem_init; 219 umem_init = umem_init->next; 220 free(pre, M_QAT); 221 } 222 *base = NULL; 223 } 224 225 static int 226 qat_uclo_parse_num(char *str, unsigned int *num) 227 { 228 char buf[16] = { 0 }; 229 unsigned long ae = 0; 230 int i; 231 232 strncpy(buf, str, 15); 233 for (i = 0; i < 16; i++) { 234 if (!isdigit(buf[i])) { 235 buf[i] = '\0'; 236 break; 237 } 238 } 239 if ((compat_strtoul(buf, 10, &ae))) 240 return EFAULT; 241 242 if (ae > MAX_UINT32_VAL) 243 return EFAULT; 244 245 *num = (unsigned int)ae; 246 return 0; 247 } 248 249 static int 250 qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle, 251 struct icp_qat_uof_initmem *init_mem, 252 unsigned int size_range, 253 unsigned int *ae) 254 { 255 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 256 char *str; 257 258 if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) { 259 pr_err("QAT: initmem is out of range"); 260 return EINVAL; 261 } 262 if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) { 263 pr_err("QAT: Memory scope for init_mem error\n"); 264 return EINVAL; 265 } 266 str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name); 267 if (!str) { 268 pr_err("QAT: AE name assigned in UOF init table is NULL\n"); 269 return EINVAL; 270 } 271 if (qat_uclo_parse_num(str, ae)) { 272 pr_err("QAT: Parse num for AE number failed\n"); 273 return EINVAL; 274 } 275 if (*ae >= ICP_QAT_UCLO_MAX_AE) { 276 pr_err("QAT: ae %d out of range\n", *ae); 277 return EINVAL; 278 } 279 return 0; 280 } 281 282 static int 283 qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle *handle, 284 struct icp_qat_uof_initmem *init_mem, 285 unsigned int ae, 286 struct icp_qat_uof_batch_init **init_tab_base) 287 { 288 struct icp_qat_uof_batch_init *init_header, *tail; 289 struct icp_qat_uof_batch_init *mem_init, *tail_old; 290 struct icp_qat_uof_memvar_attr *mem_val_attr; 291 unsigned int i = 0; 292 293 mem_val_attr = 294 (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem + 295 sizeof( 296 struct icp_qat_uof_initmem)); 297 298 init_header = *init_tab_base; 299 if (!init_header) { 300 init_header = 301 malloc(sizeof(*init_header), M_QAT, M_WAITOK | M_ZERO); 302 init_header->size = 1; 303 *init_tab_base = init_header; 304 } 305 tail_old = init_header; 306 while (tail_old->next) 307 tail_old = tail_old->next; 308 tail = tail_old; 309 for (i = 0; i < init_mem->val_attr_num; i++) { 310 mem_init = malloc(sizeof(*mem_init), M_QAT, M_WAITOK | M_ZERO); 311 mem_init->ae = ae; 312 mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte; 313 mem_init->value = &mem_val_attr->value; 314 mem_init->size = 4; 315 mem_init->next = NULL; 316 tail->next = mem_init; 317 tail = mem_init; 318 init_header->size += qat_hal_get_ins_num(); 319 mem_val_attr++; 320 } 321 return 0; 322 } 323 324 static int 325 qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle, 326 struct icp_qat_uof_initmem *init_mem) 327 { 328 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 329 unsigned int ae; 330 unsigned int lmem; 331 332 lmem = IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev))) ? 333 ICP_QAT_UCLO_MAX_LMEM_REG_2X : 334 ICP_QAT_UCLO_MAX_LMEM_REG; 335 336 if (qat_uclo_fetch_initmem_ae(handle, init_mem, lmem, &ae)) 337 return EINVAL; 338 if (qat_uclo_create_batch_init_list( 339 handle, init_mem, ae, &obj_handle->lm_init_tab[ae])) 340 return EINVAL; 341 return 0; 342 } 343 344 static int 345 qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle, 346 struct icp_qat_uof_initmem *init_mem) 347 { 348 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 349 unsigned int ae, ustore_size, uaddr, i; 350 struct icp_qat_uclo_aedata *aed; 351 352 ustore_size = obj_handle->ustore_phy_size; 353 if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae)) 354 return EINVAL; 355 if (qat_uclo_create_batch_init_list( 356 handle, init_mem, ae, &obj_handle->umem_init_tab[ae])) 357 return EINVAL; 358 /* set the highest ustore address referenced */ 359 uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2; 360 aed = &obj_handle->ae_data[ae]; 361 for (i = 0; i < aed->slice_num; i++) { 362 if (aed->ae_slices[i].encap_image->uwords_num < uaddr) 363 aed->ae_slices[i].encap_image->uwords_num = uaddr; 364 } 365 return 0; 366 } 367 368 #define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000 369 static int 370 qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle, 371 struct icp_qat_uof_initmem *init_mem) 372 { 373 switch (init_mem->region) { 374 case ICP_QAT_UOF_LMEM_REGION: 375 if (qat_uclo_init_lmem_seg(handle, init_mem)) 376 return EINVAL; 377 break; 378 case ICP_QAT_UOF_UMEM_REGION: 379 if (qat_uclo_init_umem_seg(handle, init_mem)) 380 return EINVAL; 381 break; 382 default: 383 pr_err("QAT: initmem region error. region type=0x%x\n", 384 init_mem->region); 385 return EINVAL; 386 } 387 return 0; 388 } 389 390 static int 391 qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, 392 struct icp_qat_uclo_encapme *image) 393 { 394 unsigned int i; 395 struct icp_qat_uclo_encap_page *page; 396 struct icp_qat_uof_image *uof_image; 397 unsigned char ae = 0; 398 unsigned char neigh_ae; 399 unsigned int ustore_size; 400 unsigned int patt_pos; 401 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 402 uint64_t *fill_data; 403 static unsigned int init[32] = { 0 }; 404 unsigned long ae_mask = handle->hal_handle->ae_mask; 405 406 uof_image = image->img_ptr; 407 /*if shared CS mode, the ustore size should be 2*ustore_phy_size*/ 408 fill_data = malloc(obj_handle->ustore_phy_size * 2 * sizeof(uint64_t), 409 M_QAT, 410 M_WAITOK | M_ZERO); 411 for (i = 0; i < obj_handle->ustore_phy_size * 2; i++) 412 memcpy(&fill_data[i], 413 &uof_image->fill_pattern, 414 sizeof(uint64_t)); 415 page = image->page; 416 417 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) 418 { 419 unsigned long cfg_ae_mask = handle->cfg_ae_mask; 420 unsigned long ae_assigned = uof_image->ae_assigned; 421 const bool gen4 = 422 IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev))); 423 424 if (!test_bit(ae, &cfg_ae_mask)) 425 continue; 426 427 if (!test_bit(ae, &ae_assigned)) 428 continue; 429 430 if (obj_handle->ae_data[ae].shareable_ustore && (ae & 1) && 431 !gen4) { 432 qat_hal_get_scs_neigh_ae(ae, &neigh_ae); 433 434 if (test_bit(neigh_ae, &ae_assigned)) 435 continue; 436 } 437 438 ustore_size = obj_handle->ae_data[ae].eff_ustore_size; 439 patt_pos = page->beg_addr_p + page->micro_words_num; 440 if (obj_handle->ae_data[ae].shareable_ustore && !gen4) { 441 qat_hal_get_scs_neigh_ae(ae, &neigh_ae); 442 if (init[ae] == 0 && page->beg_addr_p != 0) { 443 qat_hal_wr_coalesce_uwords(handle, 444 (unsigned char)ae, 445 0, 446 page->beg_addr_p, 447 &fill_data[0]); 448 } 449 qat_hal_wr_coalesce_uwords( 450 handle, 451 (unsigned char)ae, 452 patt_pos, 453 ustore_size - patt_pos, 454 &fill_data[page->beg_addr_p]); 455 init[ae] = 1; 456 init[neigh_ae] = 1; 457 } else { 458 if (gen4 && (ae % 4 != 0)) 459 continue; 460 461 qat_hal_wr_uwords(handle, 462 (unsigned char)ae, 463 0, 464 page->beg_addr_p, 465 &fill_data[0]); 466 qat_hal_wr_uwords(handle, 467 (unsigned char)ae, 468 patt_pos, 469 ustore_size - patt_pos + 1, 470 &fill_data[page->beg_addr_p]); 471 } 472 } 473 free(fill_data, M_QAT); 474 return 0; 475 } 476 477 static int 478 qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) 479 { 480 int i; 481 int ae = 0; 482 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 483 struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem; 484 unsigned long ae_mask = handle->hal_handle->ae_mask; 485 486 for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) { 487 if (initmem->num_in_bytes) { 488 if (qat_uclo_init_ae_memory(handle, initmem)) 489 return EINVAL; 490 } 491 initmem = 492 (struct icp_qat_uof_initmem 493 *)((uintptr_t)((uintptr_t)initmem + 494 sizeof(struct icp_qat_uof_initmem)) + 495 (sizeof(struct icp_qat_uof_memvar_attr) * 496 initmem->val_attr_num)); 497 } 498 499 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) 500 { 501 if (qat_hal_batch_wr_lm(handle, 502 ae, 503 obj_handle->lm_init_tab[ae])) { 504 pr_err("QAT: fail to batch init lmem for AE %d\n", ae); 505 return EINVAL; 506 } 507 qat_uclo_cleanup_batch_init_list(handle, 508 &obj_handle->lm_init_tab[ae]); 509 qat_uclo_batch_wr_umem(handle, 510 ae, 511 obj_handle->umem_init_tab[ae]); 512 qat_uclo_cleanup_batch_init_list( 513 handle, &obj_handle->umem_init_tab[ae]); 514 } 515 return 0; 516 } 517 518 static void * 519 qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr, 520 char *chunk_id, 521 void *cur) 522 { 523 int i; 524 struct icp_qat_uof_chunkhdr *chunk_hdr = 525 (struct icp_qat_uof_chunkhdr *)((uintptr_t)obj_hdr + 526 sizeof(struct icp_qat_uof_objhdr)); 527 528 for (i = 0; i < obj_hdr->num_chunks; i++) { 529 if ((cur < (void *)&chunk_hdr[i]) && 530 !strncmp(chunk_hdr[i].chunk_id, 531 chunk_id, 532 ICP_QAT_UOF_OBJID_LEN)) { 533 return &chunk_hdr[i]; 534 } 535 } 536 return NULL; 537 } 538 539 static unsigned int 540 qat_uclo_calc_checksum(unsigned int reg, int ch) 541 { 542 int i; 543 unsigned int topbit = 1 << 0xF; 544 unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch); 545 546 reg ^= inbyte << 0x8; 547 for (i = 0; i < 0x8; i++) { 548 if (reg & topbit) 549 reg = (reg << 1) ^ 0x1021; 550 else 551 reg <<= 1; 552 } 553 return reg & 0xFFFF; 554 } 555 556 static unsigned int 557 qat_uclo_calc_str_checksum(const char *ptr, int num) 558 { 559 unsigned int chksum = 0; 560 561 if (ptr) 562 while (num--) 563 chksum = qat_uclo_calc_checksum(chksum, *ptr++); 564 return chksum; 565 } 566 567 static struct icp_qat_uclo_objhdr * 568 qat_uclo_map_chunk(char *buf, 569 struct icp_qat_uof_filehdr *file_hdr, 570 char *chunk_id) 571 { 572 struct icp_qat_uof_filechunkhdr *file_chunk; 573 struct icp_qat_uclo_objhdr *obj_hdr; 574 char *chunk; 575 int i; 576 577 file_chunk = (struct icp_qat_uof_filechunkhdr 578 *)(buf + sizeof(struct icp_qat_uof_filehdr)); 579 for (i = 0; i < file_hdr->num_chunks; i++) { 580 if (!strncmp(file_chunk->chunk_id, 581 chunk_id, 582 ICP_QAT_UOF_OBJID_LEN)) { 583 chunk = buf + file_chunk->offset; 584 if (file_chunk->checksum != 585 qat_uclo_calc_str_checksum(chunk, file_chunk->size)) 586 break; 587 obj_hdr = 588 malloc(sizeof(*obj_hdr), M_QAT, M_WAITOK | M_ZERO); 589 obj_hdr->file_buff = chunk; 590 obj_hdr->checksum = file_chunk->checksum; 591 obj_hdr->size = file_chunk->size; 592 return obj_hdr; 593 } 594 file_chunk++; 595 } 596 return NULL; 597 } 598 599 static unsigned int 600 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj, 601 struct icp_qat_uof_image *image) 602 { 603 struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab; 604 struct icp_qat_uof_objtable *neigh_reg_tab; 605 struct icp_qat_uof_code_page *code_page; 606 607 code_page = 608 (struct icp_qat_uof_code_page *)((char *)image + 609 sizeof(struct icp_qat_uof_image)); 610 uc_var_tab = 611 (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + 612 code_page->uc_var_tab_offset); 613 imp_var_tab = 614 (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + 615 code_page->imp_var_tab_offset); 616 imp_expr_tab = 617 (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + 618 code_page->imp_expr_tab_offset); 619 if (uc_var_tab->entry_num || imp_var_tab->entry_num || 620 imp_expr_tab->entry_num) { 621 pr_err("QAT: UOF can't contain imported variable to be parsed"); 622 return EINVAL; 623 } 624 neigh_reg_tab = 625 (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + 626 code_page->neigh_reg_tab_offset); 627 if (neigh_reg_tab->entry_num) { 628 pr_err("QAT: UOF can't contain neighbor register table\n"); 629 return EINVAL; 630 } 631 if (image->numpages > 1) { 632 pr_err("QAT: UOF can't contain multiple pages\n"); 633 return EINVAL; 634 } 635 if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) { 636 pr_err("QAT: UOF can't use reloadable feature\n"); 637 return EFAULT; 638 } 639 return 0; 640 } 641 642 static void 643 qat_uclo_map_image_page(struct icp_qat_uof_encap_obj *encap_uof_obj, 644 struct icp_qat_uof_image *img, 645 struct icp_qat_uclo_encap_page *page) 646 { 647 struct icp_qat_uof_code_page *code_page; 648 struct icp_qat_uof_code_area *code_area; 649 struct icp_qat_uof_objtable *uword_block_tab; 650 struct icp_qat_uof_uword_block *uwblock; 651 int i; 652 653 code_page = 654 (struct icp_qat_uof_code_page *)((char *)img + 655 sizeof(struct icp_qat_uof_image)); 656 page->def_page = code_page->def_page; 657 page->page_region = code_page->page_region; 658 page->beg_addr_v = code_page->beg_addr_v; 659 page->beg_addr_p = code_page->beg_addr_p; 660 code_area = 661 (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof + 662 code_page->code_area_offset); 663 page->micro_words_num = code_area->micro_words_num; 664 uword_block_tab = 665 (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + 666 code_area->uword_block_tab); 667 page->uwblock_num = uword_block_tab->entry_num; 668 uwblock = (struct icp_qat_uof_uword_block 669 *)((char *)uword_block_tab + 670 sizeof(struct icp_qat_uof_objtable)); 671 page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock; 672 for (i = 0; i < uword_block_tab->entry_num; i++) 673 page->uwblock[i].micro_words = 674 (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset; 675 } 676 677 static int 678 qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, 679 struct icp_qat_uclo_encapme *ae_uimage, 680 int max_image) 681 { 682 int i, j; 683 struct icp_qat_uof_chunkhdr *chunk_hdr = NULL; 684 struct icp_qat_uof_image *image; 685 struct icp_qat_uof_objtable *ae_regtab; 686 struct icp_qat_uof_objtable *init_reg_sym_tab; 687 struct icp_qat_uof_objtable *sbreak_tab; 688 struct icp_qat_uof_encap_obj *encap_uof_obj = 689 &obj_handle->encap_uof_obj; 690 691 for (j = 0; j < max_image; j++) { 692 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, 693 ICP_QAT_UOF_IMAG, 694 chunk_hdr); 695 if (!chunk_hdr) 696 break; 697 image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof + 698 chunk_hdr->offset); 699 ae_regtab = 700 (struct icp_qat_uof_objtable *)(image->reg_tab_offset + 701 obj_handle->obj_hdr 702 ->file_buff); 703 ae_uimage[j].ae_reg_num = ae_regtab->entry_num; 704 ae_uimage[j].ae_reg = 705 (struct icp_qat_uof_ae_reg 706 *)(((char *)ae_regtab) + 707 sizeof(struct icp_qat_uof_objtable)); 708 init_reg_sym_tab = 709 (struct icp_qat_uof_objtable *)(image->init_reg_sym_tab + 710 obj_handle->obj_hdr 711 ->file_buff); 712 ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num; 713 ae_uimage[j].init_regsym = 714 (struct icp_qat_uof_init_regsym 715 *)(((char *)init_reg_sym_tab) + 716 sizeof(struct icp_qat_uof_objtable)); 717 sbreak_tab = (struct icp_qat_uof_objtable *)(image->sbreak_tab + 718 obj_handle->obj_hdr 719 ->file_buff); 720 ae_uimage[j].sbreak_num = sbreak_tab->entry_num; 721 ae_uimage[j].sbreak = 722 (struct icp_qat_uof_sbreak 723 *)(((char *)sbreak_tab) + 724 sizeof(struct icp_qat_uof_objtable)); 725 ae_uimage[j].img_ptr = image; 726 if (qat_uclo_check_image_compat(encap_uof_obj, image)) 727 goto out_err; 728 ae_uimage[j].page = 729 malloc(sizeof(struct icp_qat_uclo_encap_page), 730 M_QAT, 731 M_WAITOK | M_ZERO); 732 qat_uclo_map_image_page(encap_uof_obj, 733 image, 734 ae_uimage[j].page); 735 } 736 return j; 737 out_err: 738 for (i = 0; i < j; i++) 739 free(ae_uimage[i].page, M_QAT); 740 return 0; 741 } 742 743 static int 744 UcLo_checkTGroupList2X(struct icp_qat_fw_loader_handle *handle) 745 { 746 int i; 747 unsigned int swAe = 0; 748 unsigned int ii, jj; 749 struct icp_qat_uclo_aedata *ae_data0, *ae_datax; 750 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 751 752 for (i = 0; i < obj_handle->uimage_num; i++) { 753 struct icp_qat_uof_image *image = 754 obj_handle->ae_uimage[i].img_ptr; 755 if (image->numpages > 1) { 756 pr_err( 757 "Only 1 page is allowed in a UOF for CPM2X; We found %d in %s\n", 758 image->numpages, 759 qat_uclo_get_string(&obj_handle->str_table, 760 image->img_name)); 761 return EINVAL; 762 } 763 } 764 765 for (swAe = 0; 766 (swAe < obj_handle->ae_num) && (swAe < ICP_QAT_UCLO_MAX_AE); 767 swAe += AE_TG_NUM_CPM2X) { 768 if (!qat_hal_check_ae_active(handle, swAe)) { 769 continue; 770 } 771 772 for (ii = swAe; ii < (swAe + AE_TG_NUM_CPM2X); ii++) { 773 ae_data0 = &obj_handle->ae_data[ii]; 774 if (ae_data0->slice_num != 1) // not assigned 775 continue; 776 777 for (jj = ii + 1; jj < (swAe + AE_TG_NUM_CPM2X); jj++) { 778 ae_datax = &obj_handle->ae_data[jj]; 779 if (ae_datax->slice_num != 1) // not assigned 780 continue; 781 if (ae_data0->ae_slices[0] 782 .encap_image->img_ptr != 783 ae_datax->ae_slices[0] 784 .encap_image->img_ptr) { 785 pr_err("Only 1 list is allowed in a "); 786 pr_err("Tgroup for CPM2X;\n"); 787 pr_err("ME%d, %d is assigned", ii, jj); 788 pr_err(" different list files\n"); 789 return EINVAL; 790 } 791 } 792 } 793 } 794 795 return 0; 796 } 797 798 static int 799 qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae) 800 { 801 int i; 802 int ae = 0; 803 unsigned long ae_mask = handle->hal_handle->ae_mask; 804 unsigned long cfg_ae_mask = handle->cfg_ae_mask; 805 int mflag = 0; 806 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 807 808 for_each_set_bit(ae, &ae_mask, max_ae) 809 { 810 if (!test_bit(ae, &cfg_ae_mask)) 811 continue; 812 813 for (i = 0; i < obj_handle->uimage_num; i++) { 814 unsigned long ae_assigned = 815 obj_handle->ae_uimage[i].img_ptr->ae_assigned; 816 if (!test_bit(ae, &ae_assigned)) 817 continue; 818 mflag = 1; 819 if (qat_uclo_init_ae_data(obj_handle, ae, i)) 820 return EINVAL; 821 } 822 } 823 if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { 824 if (UcLo_checkTGroupList2X(handle)) { 825 return EINVAL; 826 } 827 } 828 if (!mflag) { 829 pr_err("QAT: uimage uses AE not set"); 830 return EINVAL; 831 } 832 return 0; 833 } 834 835 static struct icp_qat_uof_strtable * 836 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr, 837 char *tab_name, 838 struct icp_qat_uof_strtable *str_table) 839 { 840 struct icp_qat_uof_chunkhdr *chunk_hdr; 841 842 chunk_hdr = 843 qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)obj_hdr->file_buff, 844 tab_name, 845 NULL); 846 if (chunk_hdr) { 847 int hdr_size; 848 849 memcpy(&str_table->table_len, 850 obj_hdr->file_buff + chunk_hdr->offset, 851 sizeof(str_table->table_len)); 852 hdr_size = (char *)&str_table->strings - (char *)str_table; 853 str_table->strings = (uintptr_t)obj_hdr->file_buff + 854 chunk_hdr->offset + hdr_size; 855 return str_table; 856 } 857 return NULL; 858 } 859 860 static void 861 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj, 862 struct icp_qat_uclo_init_mem_table *init_mem_tab) 863 { 864 struct icp_qat_uof_chunkhdr *chunk_hdr; 865 866 chunk_hdr = 867 qat_uclo_find_chunk(encap_uof_obj->obj_hdr, ICP_QAT_UOF_IMEM, NULL); 868 if (chunk_hdr) { 869 memmove(&init_mem_tab->entry_num, 870 encap_uof_obj->beg_uof + chunk_hdr->offset, 871 sizeof(unsigned int)); 872 init_mem_tab->init_mem = 873 (struct icp_qat_uof_initmem *)(encap_uof_obj->beg_uof + 874 chunk_hdr->offset + 875 sizeof(unsigned int)); 876 } 877 } 878 879 static unsigned int 880 qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle) 881 { 882 switch (pci_get_device(GET_DEV(handle->accel_dev))) { 883 case ADF_DH895XCC_PCI_DEVICE_ID: 884 return ICP_QAT_AC_895XCC_DEV_TYPE; 885 case ADF_C62X_PCI_DEVICE_ID: 886 return ICP_QAT_AC_C62X_DEV_TYPE; 887 case ADF_C3XXX_PCI_DEVICE_ID: 888 return ICP_QAT_AC_C3XXX_DEV_TYPE; 889 case ADF_200XX_PCI_DEVICE_ID: 890 return ICP_QAT_AC_200XX_DEV_TYPE; 891 case ADF_C4XXX_PCI_DEVICE_ID: 892 return ICP_QAT_AC_C4XXX_DEV_TYPE; 893 case ADF_4XXX_PCI_DEVICE_ID: 894 case ADF_401XX_PCI_DEVICE_ID: 895 case ADF_402XX_PCI_DEVICE_ID: 896 return ICP_QAT_AC_4XXX_A_DEV_TYPE; 897 default: 898 pr_err("QAT: unsupported device 0x%x\n", 899 pci_get_device(GET_DEV(handle->accel_dev))); 900 return 0; 901 } 902 } 903 904 static int 905 qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) 906 { 907 unsigned int maj_ver, prod_type = obj_handle->prod_type; 908 909 if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) { 910 pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n", 911 obj_handle->encap_uof_obj.obj_hdr->ac_dev_type, 912 prod_type); 913 return EINVAL; 914 } 915 maj_ver = obj_handle->prod_rev & 0xff; 916 if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver || 917 obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) { 918 pr_err("QAT: UOF maj_ver 0x%x out of range\n", maj_ver); 919 return EINVAL; 920 } 921 return 0; 922 } 923 924 static int 925 qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, 926 unsigned char ae, 927 unsigned char ctx_mask, 928 enum icp_qat_uof_regtype reg_type, 929 unsigned short reg_addr, 930 unsigned int value) 931 { 932 switch (reg_type) { 933 case ICP_GPA_ABS: 934 case ICP_GPB_ABS: 935 ctx_mask = 0; 936 return qat_hal_init_gpr( 937 handle, ae, ctx_mask, reg_type, reg_addr, value); 938 case ICP_GPA_REL: 939 case ICP_GPB_REL: 940 return qat_hal_init_gpr( 941 handle, ae, ctx_mask, reg_type, reg_addr, value); 942 case ICP_SR_ABS: 943 case ICP_DR_ABS: 944 case ICP_SR_RD_ABS: 945 case ICP_DR_RD_ABS: 946 ctx_mask = 0; 947 return qat_hal_init_rd_xfer( 948 handle, ae, ctx_mask, reg_type, reg_addr, value); 949 case ICP_SR_REL: 950 case ICP_DR_REL: 951 case ICP_SR_RD_REL: 952 case ICP_DR_RD_REL: 953 return qat_hal_init_rd_xfer( 954 handle, ae, ctx_mask, reg_type, reg_addr, value); 955 case ICP_SR_WR_ABS: 956 case ICP_DR_WR_ABS: 957 ctx_mask = 0; 958 return qat_hal_init_wr_xfer( 959 handle, ae, ctx_mask, reg_type, reg_addr, value); 960 case ICP_SR_WR_REL: 961 case ICP_DR_WR_REL: 962 return qat_hal_init_wr_xfer( 963 handle, ae, ctx_mask, reg_type, reg_addr, value); 964 case ICP_NEIGH_REL: 965 return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value); 966 default: 967 pr_err("QAT: UOF uses unsupported reg type 0x%x\n", reg_type); 968 return EFAULT; 969 } 970 return 0; 971 } 972 973 static int 974 qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle, 975 unsigned int ae, 976 struct icp_qat_uclo_encapme *encap_ae) 977 { 978 unsigned int i; 979 unsigned char ctx_mask; 980 struct icp_qat_uof_init_regsym *init_regsym; 981 982 if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) == 983 ICP_QAT_UCLO_MAX_CTX) 984 ctx_mask = 0xff; 985 else 986 ctx_mask = 0x55; 987 988 for (i = 0; i < encap_ae->init_regsym_num; i++) { 989 unsigned int exp_res; 990 991 init_regsym = &encap_ae->init_regsym[i]; 992 exp_res = init_regsym->value; 993 switch (init_regsym->init_type) { 994 case ICP_QAT_UOF_INIT_REG: 995 qat_uclo_init_reg(handle, 996 ae, 997 ctx_mask, 998 (enum icp_qat_uof_regtype) 999 init_regsym->reg_type, 1000 (unsigned short)init_regsym->reg_addr, 1001 exp_res); 1002 break; 1003 case ICP_QAT_UOF_INIT_REG_CTX: 1004 /* check if ctx is appropriate for the ctxMode */ 1005 if (!((1 << init_regsym->ctx) & ctx_mask)) { 1006 pr_err("QAT: invalid ctx num = 0x%x\n", 1007 init_regsym->ctx); 1008 return EINVAL; 1009 } 1010 qat_uclo_init_reg( 1011 handle, 1012 ae, 1013 (unsigned char)(1 << init_regsym->ctx), 1014 (enum icp_qat_uof_regtype)init_regsym->reg_type, 1015 (unsigned short)init_regsym->reg_addr, 1016 exp_res); 1017 break; 1018 case ICP_QAT_UOF_INIT_EXPR: 1019 pr_err("QAT: INIT_EXPR feature not supported\n"); 1020 return EINVAL; 1021 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP: 1022 pr_err("QAT: INIT_EXPR_ENDIAN_SWAP not supported\n"); 1023 return EINVAL; 1024 default: 1025 break; 1026 } 1027 } 1028 return 0; 1029 } 1030 1031 static int 1032 qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle) 1033 { 1034 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 1035 unsigned int s; 1036 unsigned int ae = 0; 1037 struct icp_qat_uclo_aedata *aed; 1038 unsigned long ae_mask = handle->hal_handle->ae_mask; 1039 1040 if (obj_handle->global_inited) 1041 return 0; 1042 if (obj_handle->init_mem_tab.entry_num) { 1043 if (qat_uclo_init_memory(handle)) { 1044 pr_err("QAT: initialize memory failed\n"); 1045 return EINVAL; 1046 } 1047 } 1048 1049 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) 1050 { 1051 aed = &obj_handle->ae_data[ae]; 1052 for (s = 0; s < aed->slice_num; s++) { 1053 if (!aed->ae_slices[s].encap_image) 1054 continue; 1055 if (qat_uclo_init_reg_sym( 1056 handle, ae, aed->ae_slices[s].encap_image)) 1057 return EINVAL; 1058 } 1059 } 1060 obj_handle->global_inited = 1; 1061 return 0; 1062 } 1063 1064 static int 1065 qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle, 1066 struct icp_qat_uclo_objhandle *obj_handle, 1067 unsigned char ae, 1068 struct icp_qat_uof_image *uof_image) 1069 { 1070 unsigned char nn_mode; 1071 char ae_mode = 0; 1072 1073 ae_mode = (char)ICP_QAT_CTX_MODE(uof_image->ae_mode); 1074 if (qat_hal_set_ae_ctx_mode(handle, ae, ae_mode)) { 1075 pr_err("QAT: qat_hal_set_ae_ctx_mode error\n"); 1076 return EFAULT; 1077 } 1078 1079 ae_mode = (char)ICP_QAT_SHARED_USTORE_MODE(uof_image->ae_mode); 1080 qat_hal_set_ae_scs_mode(handle, ae, ae_mode); 1081 if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { 1082 nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode); 1083 1084 if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) { 1085 pr_err("QAT: qat_hal_set_ae_nn_mode error\n"); 1086 return EFAULT; 1087 } 1088 } 1089 ae_mode = (char)ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode); 1090 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, ae_mode)) { 1091 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n"); 1092 return EFAULT; 1093 } 1094 ae_mode = (char)ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode); 1095 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, ae_mode)) { 1096 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n"); 1097 return EFAULT; 1098 } 1099 if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { 1100 ae_mode = (char)ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode); 1101 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, ae_mode)) { 1102 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n"); 1103 return EFAULT; 1104 } 1105 ae_mode = (char)ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode); 1106 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, ae_mode)) { 1107 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n"); 1108 return EFAULT; 1109 } 1110 ae_mode = (char)ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode); 1111 qat_hal_set_ae_tindex_mode(handle, ae, ae_mode); 1112 } 1113 return 0; 1114 } 1115 1116 static int 1117 qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle) 1118 { 1119 int error; 1120 unsigned char s; 1121 unsigned char ae = 0; 1122 struct icp_qat_uof_image *uof_image; 1123 struct icp_qat_uclo_aedata *ae_data; 1124 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 1125 unsigned long ae_mask = handle->hal_handle->ae_mask; 1126 1127 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) 1128 { 1129 unsigned long cfg_ae_mask = handle->cfg_ae_mask; 1130 1131 if (!test_bit(ae, &cfg_ae_mask)) 1132 continue; 1133 1134 ae_data = &obj_handle->ae_data[ae]; 1135 for (s = 0; s < min_t(unsigned int, 1136 ae_data->slice_num, 1137 ICP_QAT_UCLO_MAX_CTX); 1138 s++) { 1139 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image) 1140 continue; 1141 uof_image = ae_data->ae_slices[s].encap_image->img_ptr; 1142 error = qat_hal_set_modes(handle, 1143 obj_handle, 1144 ae, 1145 uof_image); 1146 if (error) 1147 return error; 1148 } 1149 } 1150 return 0; 1151 } 1152 1153 static void 1154 qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle) 1155 { 1156 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 1157 struct icp_qat_uclo_encapme *image; 1158 int a; 1159 1160 for (a = 0; a < obj_handle->uimage_num; a++) { 1161 image = &obj_handle->ae_uimage[a]; 1162 image->uwords_num = 1163 image->page->beg_addr_p + image->page->micro_words_num; 1164 } 1165 } 1166 1167 static int 1168 qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) 1169 { 1170 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 1171 unsigned int ae; 1172 1173 obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff; 1174 obj_handle->encap_uof_obj.obj_hdr = 1175 (struct icp_qat_uof_objhdr *)obj_handle->obj_hdr->file_buff; 1176 obj_handle->uword_in_bytes = 6; 1177 obj_handle->prod_type = qat_uclo_get_dev_type(handle); 1178 obj_handle->prod_rev = 1179 PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id); 1180 if (qat_uclo_check_uof_compat(obj_handle)) { 1181 pr_err("QAT: UOF incompatible\n"); 1182 return EINVAL; 1183 } 1184 obj_handle->uword_buf = malloc(UWORD_CPYBUF_SIZE * sizeof(uint64_t), 1185 M_QAT, 1186 M_WAITOK | M_ZERO); 1187 obj_handle->ustore_phy_size = 1188 (obj_handle->prod_type == ICP_QAT_AC_C4XXX_DEV_TYPE) ? 0x2000 : 1189 0x4000; 1190 if (!obj_handle->obj_hdr->file_buff || 1191 !qat_uclo_map_str_table(obj_handle->obj_hdr, 1192 ICP_QAT_UOF_STRT, 1193 &obj_handle->str_table)) { 1194 pr_err("QAT: UOF doesn't have effective images\n"); 1195 goto out_err; 1196 } 1197 obj_handle->uimage_num = 1198 qat_uclo_map_uimage(obj_handle, 1199 obj_handle->ae_uimage, 1200 ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX); 1201 if (!obj_handle->uimage_num) 1202 goto out_err; 1203 if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) { 1204 pr_err("QAT: Bad object\n"); 1205 goto out_check_uof_aemask_err; 1206 } 1207 qat_uclo_init_uword_num(handle); 1208 qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj, 1209 &obj_handle->init_mem_tab); 1210 if (qat_uclo_set_ae_mode(handle)) 1211 goto out_check_uof_aemask_err; 1212 return 0; 1213 out_check_uof_aemask_err: 1214 for (ae = 0; ae < obj_handle->uimage_num; ae++) 1215 free(obj_handle->ae_uimage[ae].page, M_QAT); 1216 out_err: 1217 free(obj_handle->uword_buf, M_QAT); 1218 obj_handle->uword_buf = NULL; 1219 return EFAULT; 1220 } 1221 1222 static int 1223 qat_uclo_map_suof_file_hdr(const struct icp_qat_fw_loader_handle *handle, 1224 const struct icp_qat_suof_filehdr *suof_ptr, 1225 int suof_size) 1226 { 1227 unsigned int check_sum = 0; 1228 unsigned int min_ver_offset = 0; 1229 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; 1230 1231 suof_handle->file_id = ICP_QAT_SUOF_FID; 1232 suof_handle->suof_buf = (const char *)suof_ptr; 1233 suof_handle->suof_size = suof_size; 1234 min_ver_offset = 1235 suof_size - offsetof(struct icp_qat_suof_filehdr, min_ver); 1236 check_sum = qat_uclo_calc_str_checksum((const char *)&suof_ptr->min_ver, 1237 min_ver_offset); 1238 if (check_sum != suof_ptr->check_sum) { 1239 pr_err("QAT: incorrect SUOF checksum\n"); 1240 return EINVAL; 1241 } 1242 suof_handle->check_sum = suof_ptr->check_sum; 1243 suof_handle->min_ver = suof_ptr->min_ver; 1244 suof_handle->maj_ver = suof_ptr->maj_ver; 1245 suof_handle->fw_type = suof_ptr->fw_type; 1246 return 0; 1247 } 1248 1249 static void 1250 qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle, 1251 struct icp_qat_suof_img_hdr *suof_img_hdr, 1252 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) 1253 { 1254 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; 1255 const struct icp_qat_simg_ae_mode *ae_mode; 1256 struct icp_qat_suof_objhdr *suof_objhdr; 1257 unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev)); 1258 1259 suof_img_hdr->simg_buf = 1260 (suof_handle->suof_buf + suof_chunk_hdr->offset + 1261 sizeof(*suof_objhdr)); 1262 suof_img_hdr->simg_len = 1263 ((struct icp_qat_suof_objhdr *)(uintptr_t)(suof_handle->suof_buf + 1264 suof_chunk_hdr->offset)) 1265 ->img_length; 1266 1267 suof_img_hdr->css_header = suof_img_hdr->simg_buf; 1268 suof_img_hdr->css_key = 1269 (suof_img_hdr->css_header + sizeof(struct icp_qat_css_hdr)); 1270 suof_img_hdr->css_signature = suof_img_hdr->css_key + 1271 ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) + 1272 ICP_QAT_CSS_FWSK_EXPONENT_LEN(device_id); 1273 suof_img_hdr->css_simg = 1274 suof_img_hdr->css_signature + ICP_QAT_CSS_SIGNATURE_LEN(device_id); 1275 1276 ae_mode = (const struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg); 1277 suof_img_hdr->ae_mask = ae_mode->ae_mask; 1278 suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name; 1279 suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data; 1280 suof_img_hdr->fw_type = ae_mode->fw_type; 1281 } 1282 1283 static void 1284 qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle, 1285 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) 1286 { 1287 char **sym_str = (char **)&suof_handle->sym_str; 1288 unsigned int *sym_size = &suof_handle->sym_size; 1289 struct icp_qat_suof_strtable *str_table_obj; 1290 1291 *sym_size = *(unsigned int *)(uintptr_t)(suof_chunk_hdr->offset + 1292 suof_handle->suof_buf); 1293 *sym_str = 1294 (char *)(uintptr_t)(suof_handle->suof_buf + suof_chunk_hdr->offset + 1295 sizeof(str_table_obj->tab_length)); 1296 } 1297 1298 static int 1299 qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle, 1300 struct icp_qat_suof_img_hdr *img_hdr) 1301 { 1302 const struct icp_qat_simg_ae_mode *img_ae_mode = NULL; 1303 unsigned int prod_rev, maj_ver, prod_type; 1304 1305 prod_type = qat_uclo_get_dev_type(handle); 1306 img_ae_mode = (const struct icp_qat_simg_ae_mode *)img_hdr->css_simg; 1307 prod_rev = 1308 PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id); 1309 if (img_ae_mode->dev_type != prod_type) { 1310 pr_err("QAT: incompatible product type %x\n", 1311 img_ae_mode->dev_type); 1312 return EINVAL; 1313 } 1314 maj_ver = prod_rev & 0xff; 1315 if (maj_ver > img_ae_mode->devmax_ver || 1316 maj_ver < img_ae_mode->devmin_ver) { 1317 pr_err("QAT: incompatible device maj_ver 0x%x\n", maj_ver); 1318 return EINVAL; 1319 } 1320 return 0; 1321 } 1322 1323 static void 1324 qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle) 1325 { 1326 struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; 1327 1328 free(sobj_handle->img_table.simg_hdr, M_QAT); 1329 sobj_handle->img_table.simg_hdr = NULL; 1330 free(handle->sobj_handle, M_QAT); 1331 handle->sobj_handle = NULL; 1332 } 1333 1334 static void 1335 qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr, 1336 unsigned int img_id, 1337 unsigned int num_simgs) 1338 { 1339 struct icp_qat_suof_img_hdr img_header; 1340 1341 if ((img_id != num_simgs - 1) && img_id != ICP_QAT_UCLO_MAX_AE) { 1342 memcpy(&img_header, 1343 &suof_img_hdr[num_simgs - 1], 1344 sizeof(*suof_img_hdr)); 1345 memcpy(&suof_img_hdr[num_simgs - 1], 1346 &suof_img_hdr[img_id], 1347 sizeof(*suof_img_hdr)); 1348 memcpy(&suof_img_hdr[img_id], 1349 &img_header, 1350 sizeof(*suof_img_hdr)); 1351 } 1352 } 1353 1354 static int 1355 qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle, 1356 const struct icp_qat_suof_filehdr *suof_ptr, 1357 int suof_size) 1358 { 1359 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; 1360 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL; 1361 struct icp_qat_suof_img_hdr *suof_img_hdr = NULL; 1362 int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE, 1363 aeMax_img = ICP_QAT_UCLO_MAX_AE; 1364 unsigned int i = 0; 1365 struct icp_qat_suof_img_hdr img_header; 1366 1367 if (!suof_ptr || suof_size == 0) { 1368 pr_err("QAT: input parameter SUOF pointer/size is NULL\n"); 1369 return EINVAL; 1370 } 1371 if (qat_uclo_check_suof_format(suof_ptr)) 1372 return EINVAL; 1373 ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size); 1374 if (ret) 1375 return ret; 1376 suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)((uintptr_t)suof_ptr + 1377 sizeof(*suof_ptr)); 1378 1379 qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr); 1380 suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1; 1381 1382 if (suof_handle->img_table.num_simgs != 0) { 1383 suof_img_hdr = malloc(suof_handle->img_table.num_simgs * 1384 sizeof(img_header), 1385 M_QAT, 1386 M_WAITOK | M_ZERO); 1387 suof_handle->img_table.simg_hdr = suof_img_hdr; 1388 } 1389 1390 for (i = 0; i < suof_handle->img_table.num_simgs; i++) { 1391 qat_uclo_map_simg(handle, 1392 &suof_img_hdr[i], 1393 &suof_chunk_hdr[1 + i]); 1394 ret = qat_uclo_check_simg_compat(handle, &suof_img_hdr[i]); 1395 if (ret) 1396 return ret; 1397 suof_img_hdr[i].ae_mask &= handle->cfg_ae_mask; 1398 if ((suof_img_hdr[i].ae_mask & 0x1) != 0) 1399 ae0_img = i; 1400 } 1401 1402 if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { 1403 qat_uclo_tail_img(suof_img_hdr, 1404 ae0_img, 1405 suof_handle->img_table.num_simgs); 1406 } else { 1407 if (suof_handle->img_table.num_simgs == 1) 1408 return 0; 1409 qat_uclo_tail_img(suof_img_hdr, 1410 ae0_img, 1411 suof_handle->img_table.num_simgs - 1); 1412 for (i = 0; i < suof_handle->img_table.num_simgs; i++) { 1413 if ((suof_img_hdr[i].ae_mask & 1414 (0x1 << (handle->hal_handle->ae_max_num - 1))) != 1415 0) { 1416 aeMax_img = i; 1417 break; 1418 } 1419 } 1420 qat_uclo_tail_img(suof_img_hdr, 1421 aeMax_img, 1422 suof_handle->img_table.num_simgs); 1423 } 1424 return 0; 1425 } 1426 1427 #define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + (low)) 1428 #define BITS_IN_DWORD 32 1429 1430 static int 1431 qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, 1432 struct icp_qat_fw_auth_desc *desc) 1433 { 1434 unsigned int fcu_sts, mem_cfg_err, retry = 0; 1435 unsigned int fcu_ctl_csr, fcu_sts_csr; 1436 unsigned int fcu_dram_hi_csr, fcu_dram_lo_csr; 1437 u64 bus_addr; 1438 1439 bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low) - 1440 sizeof(struct icp_qat_auth_chunk); 1441 if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { 1442 fcu_ctl_csr = FCU_CONTROL_C4XXX; 1443 fcu_sts_csr = FCU_STATUS_C4XXX; 1444 fcu_dram_hi_csr = FCU_DRAM_ADDR_HI_C4XXX; 1445 fcu_dram_lo_csr = FCU_DRAM_ADDR_LO_C4XXX; 1446 } else { 1447 fcu_ctl_csr = FCU_CONTROL; 1448 fcu_sts_csr = FCU_STATUS; 1449 fcu_dram_hi_csr = FCU_DRAM_ADDR_HI; 1450 fcu_dram_lo_csr = FCU_DRAM_ADDR_LO; 1451 } 1452 SET_FCU_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD)); 1453 SET_FCU_CSR(handle, fcu_dram_lo_csr, bus_addr); 1454 SET_FCU_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH); 1455 1456 do { 1457 pause_ms("adfstop", FW_AUTH_WAIT_PERIOD); 1458 fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr); 1459 if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL) 1460 goto auth_fail; 1461 if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1)) 1462 if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE) 1463 return 0; 1464 } while (retry++ < FW_AUTH_MAX_RETRY); 1465 auth_fail: 1466 pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n", 1467 fcu_sts & FCU_AUTH_STS_MASK, 1468 retry); 1469 if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) { 1470 mem_cfg_err = 1471 (GET_FCU_CSR(handle, FCU_STATUS1_C4XXX) & MEM_CFG_ERR_BIT); 1472 if (mem_cfg_err) 1473 pr_err("QAT: MEM_CFG_ERR\n"); 1474 } 1475 return EINVAL; 1476 } 1477 1478 static int 1479 qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle *handle, int imgid) 1480 { 1481 struct icp_qat_suof_handle *sobj_handle; 1482 1483 if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) 1484 return 0; 1485 1486 sobj_handle = (struct icp_qat_suof_handle *)handle->sobj_handle; 1487 if (handle->hal_handle->admin_ae_mask & 1488 sobj_handle->img_table.simg_hdr[imgid].ae_mask) 1489 return 0; 1490 1491 return 1; 1492 } 1493 1494 static int 1495 qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle, 1496 struct icp_qat_fw_auth_desc *desc) 1497 { 1498 unsigned int i = 0; 1499 unsigned int fcuSts = 0, fcuAeBroadcastMask = 0; 1500 unsigned int retry = 0; 1501 unsigned int fcuStsCsr = 0; 1502 unsigned int fcuCtlCsr = 0; 1503 unsigned int loadedAes = 0; 1504 unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev)); 1505 1506 if (IS_QAT_GEN4(device_id)) { 1507 fcuCtlCsr = FCU_CONTROL_4XXX; 1508 fcuStsCsr = FCU_STATUS_4XXX; 1509 } else { 1510 pr_err("Uclo_BroadcastLoadFW only applicable for CPM20\n"); 1511 return EINVAL; 1512 } 1513 1514 for (i = 0; i < ICP_QAT_UCLO_MAX_AE; i++) { 1515 if (!test_bit(i, (unsigned long *)&handle->hal_handle->ae_mask)) 1516 continue; 1517 1518 if (qat_hal_check_ae_active(handle, (unsigned char)i)) { 1519 pr_err( 1520 "Uclo_BroadcastLoadFW error (invalid AE status)\n"); 1521 return EINVAL; 1522 } 1523 1524 if ((desc->ae_mask >> i) & 0x1) { 1525 fcuAeBroadcastMask |= 1 << i; 1526 } 1527 } 1528 1529 if (fcuAeBroadcastMask) { 1530 retry = 0; 1531 SET_FCU_CSR(handle, 1532 FCU_ME_BROADCAST_MASK_TYPE, 1533 fcuAeBroadcastMask); 1534 SET_FCU_CSR(handle, fcuCtlCsr, FCU_CTRL_CMD_LOAD); 1535 do { 1536 msleep(FW_AUTH_WAIT_PERIOD); 1537 fcuSts = GET_FCU_CSR(handle, fcuStsCsr); 1538 1539 if ((fcuSts & FCU_AUTH_STS_MASK) == FCU_STS_LOAD_FAIL) { 1540 pr_err( 1541 "Uclo_BroadcastLoadFW fail (fcu_status = 0x%x)\n", 1542 fcuSts & FCU_AUTH_STS_MASK); 1543 return EINVAL; 1544 } else if ((fcuSts & FCU_AUTH_STS_MASK) == 1545 FCU_STS_LOAD_DONE) { 1546 if (IS_QAT_GEN4(device_id)) 1547 loadedAes = 1548 GET_FCU_CSR(handle, 1549 FCU_AE_LOADED_4XXX); 1550 else 1551 loadedAes = 1552 (fcuSts >> FCU_LOADED_AE_POS); 1553 1554 if ((loadedAes & fcuAeBroadcastMask) == 1555 fcuAeBroadcastMask) 1556 break; 1557 } else if ((fcuSts & FCU_AUTH_STS_MASK) == 1558 FCU_STS_VERI_DONE) { 1559 SET_FCU_CSR(handle, 1560 fcuCtlCsr, 1561 FCU_CTRL_CMD_LOAD); 1562 } 1563 } while (retry++ < FW_BROADCAST_MAX_RETRY); 1564 if (retry > FW_BROADCAST_MAX_RETRY) { 1565 pr_err( 1566 "Uclo_BroadcastLoadFW fail(fcu_status = 0x%x),retry = %d\n", 1567 fcuSts & FCU_AUTH_STS_MASK, 1568 retry); 1569 return EINVAL; 1570 } 1571 } 1572 return 0; 1573 } 1574 1575 static int 1576 qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle, 1577 struct icp_firml_dram_desc *dram_desc, 1578 unsigned int size) 1579 { 1580 int ret; 1581 1582 ret = bus_dma_mem_create(&dram_desc->dram_mem, 1583 handle->accel_dev->dma_tag, 1584 1, 1585 BUS_SPACE_MAXADDR, 1586 size, 1587 0); 1588 if (ret != 0) 1589 return ret; 1590 dram_desc->dram_base_addr_v = dram_desc->dram_mem.dma_vaddr; 1591 dram_desc->dram_bus_addr = dram_desc->dram_mem.dma_baddr; 1592 dram_desc->dram_size = size; 1593 return 0; 1594 } 1595 1596 static void 1597 qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle, 1598 struct icp_firml_dram_desc *dram_desc) 1599 { 1600 if (handle && dram_desc && dram_desc->dram_base_addr_v) 1601 bus_dma_mem_free(&dram_desc->dram_mem); 1602 1603 if (dram_desc) 1604 explicit_bzero(dram_desc, sizeof(*dram_desc)); 1605 } 1606 1607 static int 1608 qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, 1609 const char *image, 1610 unsigned int size, 1611 struct icp_firml_dram_desc *img_desc, 1612 struct icp_qat_fw_auth_desc **desc) 1613 { 1614 const struct icp_qat_css_hdr *css_hdr = 1615 (const struct icp_qat_css_hdr *)image; 1616 struct icp_qat_fw_auth_desc *auth_desc; 1617 struct icp_qat_auth_chunk *auth_chunk; 1618 u64 virt_addr, bus_addr, virt_base; 1619 unsigned int length, simg_offset = sizeof(*auth_chunk); 1620 unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev)); 1621 1622 if (size <= ICP_QAT_AE_IMG_OFFSET(device_id)) { 1623 pr_err("QAT: error, input image size too small %d\n", size); 1624 return EINVAL; 1625 } 1626 1627 if (size > 1628 (ICP_QAT_AE_IMG_OFFSET(device_id) + ICP_QAT_CSS_MAX_IMAGE_LEN)) { 1629 pr_err("QAT: error, input image size overflow %d\n", size); 1630 return EINVAL; 1631 } 1632 1633 length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ? 1634 ICP_QAT_CSS_AE_SIMG_LEN(device_id) + simg_offset : 1635 size + ICP_QAT_CSS_FWSK_PAD_LEN(device_id) + simg_offset; 1636 if (qat_uclo_simg_alloc(handle, img_desc, length)) { 1637 pr_err("QAT: error, allocate continuous dram fail\n"); 1638 return -ENOMEM; 1639 } 1640 1641 auth_chunk = img_desc->dram_base_addr_v; 1642 auth_chunk->chunk_size = img_desc->dram_size; 1643 auth_chunk->chunk_bus_addr = img_desc->dram_bus_addr; 1644 virt_base = (uintptr_t)img_desc->dram_base_addr_v + simg_offset; 1645 bus_addr = img_desc->dram_bus_addr + simg_offset; 1646 auth_desc = img_desc->dram_base_addr_v; 1647 auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); 1648 auth_desc->css_hdr_low = (unsigned int)bus_addr; 1649 virt_addr = virt_base; 1650 1651 memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr)); 1652 /* pub key */ 1653 bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) + 1654 sizeof(*css_hdr); 1655 virt_addr = virt_addr + sizeof(*css_hdr); 1656 1657 auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); 1658 auth_desc->fwsk_pub_low = (unsigned int)bus_addr; 1659 1660 memcpy((void *)(uintptr_t)virt_addr, 1661 (const void *)(image + sizeof(*css_hdr)), 1662 ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id)); 1663 /* padding */ 1664 explicit_bzero((void *)(uintptr_t)( 1665 virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id)), 1666 ICP_QAT_CSS_FWSK_PAD_LEN(device_id)); 1667 1668 /* exponent */ 1669 memcpy((void *)(uintptr_t)(virt_addr + 1670 ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) + 1671 ICP_QAT_CSS_FWSK_PAD_LEN(device_id)), 1672 (const void *)(image + sizeof(*css_hdr) + 1673 ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id)), 1674 sizeof(unsigned int)); 1675 1676 /* signature */ 1677 bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high, auth_desc->fwsk_pub_low) + 1678 ICP_QAT_CSS_FWSK_PUB_LEN(device_id); 1679 virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(device_id); 1680 auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); 1681 auth_desc->signature_low = (unsigned int)bus_addr; 1682 1683 memcpy((void *)(uintptr_t)virt_addr, 1684 (const void *)(image + sizeof(*css_hdr) + 1685 ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) + 1686 ICP_QAT_CSS_FWSK_EXPONENT_LEN(device_id)), 1687 ICP_QAT_CSS_SIGNATURE_LEN(device_id)); 1688 1689 bus_addr = 1690 ADD_ADDR(auth_desc->signature_high, auth_desc->signature_low) + 1691 ICP_QAT_CSS_SIGNATURE_LEN(device_id); 1692 virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(device_id); 1693 1694 auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); 1695 auth_desc->img_low = (unsigned int)bus_addr; 1696 auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(device_id); 1697 memcpy((void *)(uintptr_t)virt_addr, 1698 (const void *)(image + ICP_QAT_AE_IMG_OFFSET(device_id)), 1699 auth_desc->img_len); 1700 virt_addr = virt_base; 1701 /* AE firmware */ 1702 if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type == 1703 CSS_AE_FIRMWARE) { 1704 auth_desc->img_ae_mode_data_high = auth_desc->img_high; 1705 auth_desc->img_ae_mode_data_low = auth_desc->img_low; 1706 bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high, 1707 auth_desc->img_ae_mode_data_low) + 1708 sizeof(struct icp_qat_simg_ae_mode); 1709 1710 auth_desc->img_ae_init_data_high = 1711 (unsigned int)(bus_addr >> BITS_IN_DWORD); 1712 auth_desc->img_ae_init_data_low = (unsigned int)bus_addr; 1713 bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN; 1714 auth_desc->img_ae_insts_high = 1715 (unsigned int)(bus_addr >> BITS_IN_DWORD); 1716 auth_desc->img_ae_insts_low = (unsigned int)bus_addr; 1717 virt_addr += sizeof(struct icp_qat_css_hdr) + 1718 ICP_QAT_CSS_FWSK_PUB_LEN(device_id) + 1719 ICP_QAT_CSS_SIGNATURE_LEN(device_id); 1720 auth_desc->ae_mask = 1721 ((struct icp_qat_simg_ae_mode *)virt_addr)->ae_mask & 1722 handle->cfg_ae_mask; 1723 } else { 1724 auth_desc->img_ae_insts_high = auth_desc->img_high; 1725 auth_desc->img_ae_insts_low = auth_desc->img_low; 1726 } 1727 *desc = auth_desc; 1728 return 0; 1729 } 1730 1731 static int 1732 qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle, 1733 struct icp_qat_fw_auth_desc *desc) 1734 { 1735 unsigned int i = 0; 1736 unsigned int fcu_sts; 1737 unsigned int fcu_sts_csr, fcu_ctl_csr; 1738 unsigned int loaded_aes = FCU_LOADED_AE_POS; 1739 unsigned long ae_mask = handle->hal_handle->ae_mask; 1740 1741 if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { 1742 fcu_ctl_csr = FCU_CONTROL_C4XXX; 1743 fcu_sts_csr = FCU_STATUS_C4XXX; 1744 1745 } else { 1746 fcu_ctl_csr = FCU_CONTROL; 1747 fcu_sts_csr = FCU_STATUS; 1748 } 1749 1750 for_each_set_bit(i, &ae_mask, handle->hal_handle->ae_max_num) 1751 { 1752 int retry = 0; 1753 1754 if (!((desc->ae_mask >> i) & 0x1)) 1755 continue; 1756 if (qat_hal_check_ae_active(handle, i)) { 1757 pr_err("QAT: AE %d is active\n", i); 1758 return EINVAL; 1759 } 1760 SET_FCU_CSR(handle, 1761 fcu_ctl_csr, 1762 (FCU_CTRL_CMD_LOAD | 1763 (IS_QAT_GEN4( 1764 pci_get_device(GET_DEV(handle->accel_dev))) ? 1765 (1 << FCU_CTRL_BROADCAST_POS) : 1766 0) | 1767 (i << FCU_CTRL_AE_POS))); 1768 1769 do { 1770 pause_ms("adfstop", FW_AUTH_WAIT_PERIOD); 1771 fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr); 1772 if ((fcu_sts & FCU_AUTH_STS_MASK) == 1773 FCU_STS_LOAD_DONE) { 1774 loaded_aes = IS_QAT_GEN3_OR_GEN4(pci_get_device( 1775 GET_DEV(handle->accel_dev))) ? 1776 GET_FCU_CSR(handle, FCU_AE_LOADED_C4XXX) : 1777 (fcu_sts >> FCU_LOADED_AE_POS); 1778 if (loaded_aes & (1 << i)) 1779 break; 1780 } 1781 } while (retry++ < FW_AUTH_MAX_RETRY); 1782 if (retry > FW_AUTH_MAX_RETRY) { 1783 pr_err("QAT: firmware load failed timeout %x\n", retry); 1784 return EINVAL; 1785 } 1786 } 1787 return 0; 1788 } 1789 1790 static int 1791 qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle, 1792 const void *addr_ptr, 1793 int mem_size) 1794 { 1795 struct icp_qat_suof_handle *suof_handle; 1796 1797 suof_handle = malloc(sizeof(*suof_handle), M_QAT, M_WAITOK | M_ZERO); 1798 handle->sobj_handle = suof_handle; 1799 if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) { 1800 qat_uclo_del_suof(handle); 1801 pr_err("QAT: map SUOF failed\n"); 1802 return EINVAL; 1803 } 1804 return 0; 1805 } 1806 1807 int 1808 qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, 1809 const void *addr_ptr, 1810 int mem_size) 1811 { 1812 struct icp_qat_fw_auth_desc *desc = NULL; 1813 struct icp_firml_dram_desc img_desc; 1814 int status = 0; 1815 1816 if (handle->fw_auth) { 1817 status = qat_uclo_map_auth_fw( 1818 handle, addr_ptr, mem_size, &img_desc, &desc); 1819 if (!status) 1820 status = qat_uclo_auth_fw(handle, desc); 1821 1822 qat_uclo_simg_free(handle, &img_desc); 1823 } else { 1824 if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { 1825 device_printf( 1826 NULL, "QAT: PKE service is not allowed because "); 1827 device_printf(NULL, "MMP fw will not be loaded for "); 1828 device_printf(NULL, 1829 "device 0x%x", 1830 pci_get_device( 1831 GET_DEV(handle->accel_dev))); 1832 return status; 1833 } 1834 status = qat_uclo_wr_sram_by_words(handle, 1835 handle->hal_sram_offset, 1836 addr_ptr, 1837 mem_size); 1838 } 1839 return status; 1840 } 1841 1842 static int 1843 qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, 1844 const void *addr_ptr, 1845 int mem_size) 1846 { 1847 struct icp_qat_uof_filehdr *filehdr; 1848 struct icp_qat_uclo_objhandle *objhdl; 1849 1850 objhdl = malloc(sizeof(*objhdl), M_QAT, M_WAITOK | M_ZERO); 1851 objhdl->obj_buf = malloc(mem_size, M_QAT, M_WAITOK); 1852 bcopy(addr_ptr, objhdl->obj_buf, mem_size); 1853 filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf; 1854 if (qat_uclo_check_uof_format(filehdr)) 1855 goto out_objhdr_err; 1856 objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, 1857 filehdr, 1858 ICP_QAT_UOF_OBJS); 1859 if (!objhdl->obj_hdr) { 1860 pr_err("QAT: object file chunk is null\n"); 1861 goto out_objhdr_err; 1862 } 1863 handle->obj_handle = objhdl; 1864 if (qat_uclo_parse_uof_obj(handle)) 1865 goto out_overlay_obj_err; 1866 return 0; 1867 1868 out_overlay_obj_err: 1869 handle->obj_handle = NULL; 1870 free(objhdl->obj_hdr, M_QAT); 1871 out_objhdr_err: 1872 free(objhdl->obj_buf, M_QAT); 1873 free(objhdl, M_QAT); 1874 return ENOMEM; 1875 } 1876 1877 static int 1878 qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle, 1879 const struct icp_qat_mof_file_hdr *mof_ptr, 1880 u32 mof_size) 1881 { 1882 unsigned int checksum = 0; 1883 unsigned int min_ver_offset = 0; 1884 struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle; 1885 1886 mobj_handle->file_id = ICP_QAT_MOF_FID; 1887 mobj_handle->mof_buf = (const char *)mof_ptr; 1888 mobj_handle->mof_size = mof_size; 1889 1890 min_ver_offset = 1891 mof_size - offsetof(struct icp_qat_mof_file_hdr, min_ver); 1892 checksum = qat_uclo_calc_str_checksum((const char *)&mof_ptr->min_ver, 1893 min_ver_offset); 1894 if (checksum != mof_ptr->checksum) { 1895 pr_err("QAT: incorrect MOF checksum\n"); 1896 return EINVAL; 1897 } 1898 mobj_handle->checksum = mof_ptr->checksum; 1899 mobj_handle->min_ver = mof_ptr->min_ver; 1900 mobj_handle->maj_ver = mof_ptr->maj_ver; 1901 return 0; 1902 } 1903 1904 void 1905 qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle) 1906 { 1907 struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle; 1908 1909 free(mobj_handle->obj_table.obj_hdr, M_QAT); 1910 mobj_handle->obj_table.obj_hdr = NULL; 1911 free(handle->mobj_handle, M_QAT); 1912 handle->mobj_handle = NULL; 1913 } 1914 1915 static int 1916 qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle, 1917 const char *obj_name, 1918 const char **obj_ptr, 1919 unsigned int *obj_size) 1920 { 1921 unsigned int i; 1922 struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr; 1923 1924 for (i = 0; i < mobj_handle->obj_table.num_objs; i++) { 1925 if (!strncmp(obj_hdr[i].obj_name, 1926 obj_name, 1927 ICP_QAT_SUOF_OBJ_NAME_LEN)) { 1928 *obj_ptr = obj_hdr[i].obj_buf; 1929 *obj_size = obj_hdr[i].obj_size; 1930 break; 1931 } 1932 } 1933 1934 if (i >= mobj_handle->obj_table.num_objs) { 1935 pr_err("QAT: object %s is not found inside MOF\n", obj_name); 1936 return EFAULT; 1937 } 1938 return 0; 1939 } 1940 1941 static int 1942 qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle, 1943 struct icp_qat_mof_objhdr *mobj_hdr, 1944 struct icp_qat_mof_obj_chunkhdr *obj_chunkhdr) 1945 { 1946 if ((strncmp((char *)obj_chunkhdr->chunk_id, 1947 ICP_QAT_UOF_IMAG, 1948 ICP_QAT_MOF_OBJ_CHUNKID_LEN)) == 0) { 1949 mobj_hdr->obj_buf = 1950 (const char *)((unsigned long)obj_chunkhdr->offset + 1951 mobj_handle->uobjs_hdr); 1952 } else if ((strncmp((char *)(obj_chunkhdr->chunk_id), 1953 ICP_QAT_SUOF_IMAG, 1954 ICP_QAT_MOF_OBJ_CHUNKID_LEN)) == 0) { 1955 mobj_hdr->obj_buf = 1956 (const char *)((unsigned long)obj_chunkhdr->offset + 1957 mobj_handle->sobjs_hdr); 1958 1959 } else { 1960 pr_err("QAT: unsupported chunk id\n"); 1961 return EINVAL; 1962 } 1963 mobj_hdr->obj_size = (unsigned int)obj_chunkhdr->size; 1964 mobj_hdr->obj_name = 1965 (char *)(obj_chunkhdr->name + mobj_handle->sym_str); 1966 return 0; 1967 } 1968 1969 static int 1970 qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle) 1971 { 1972 struct icp_qat_mof_objhdr *mof_obj_hdr; 1973 const struct icp_qat_mof_obj_hdr *uobj_hdr; 1974 const struct icp_qat_mof_obj_hdr *sobj_hdr; 1975 struct icp_qat_mof_obj_chunkhdr *uobj_chunkhdr; 1976 struct icp_qat_mof_obj_chunkhdr *sobj_chunkhdr; 1977 unsigned int uobj_chunk_num = 0, sobj_chunk_num = 0; 1978 unsigned int *valid_chunks = 0; 1979 int ret, i; 1980 1981 uobj_hdr = (const struct icp_qat_mof_obj_hdr *)mobj_handle->uobjs_hdr; 1982 sobj_hdr = (const struct icp_qat_mof_obj_hdr *)mobj_handle->sobjs_hdr; 1983 if (uobj_hdr) 1984 uobj_chunk_num = uobj_hdr->num_chunks; 1985 if (sobj_hdr) 1986 sobj_chunk_num = sobj_hdr->num_chunks; 1987 1988 mof_obj_hdr = (struct icp_qat_mof_objhdr *) 1989 malloc((uobj_chunk_num + sobj_chunk_num) * sizeof(*mof_obj_hdr), 1990 M_QAT, 1991 M_WAITOK | M_ZERO); 1992 1993 mobj_handle->obj_table.obj_hdr = mof_obj_hdr; 1994 valid_chunks = &mobj_handle->obj_table.num_objs; 1995 uobj_chunkhdr = 1996 (struct icp_qat_mof_obj_chunkhdr *)((uintptr_t)uobj_hdr + 1997 sizeof(*uobj_hdr)); 1998 sobj_chunkhdr = 1999 (struct icp_qat_mof_obj_chunkhdr *)((uintptr_t)sobj_hdr + 2000 sizeof(*sobj_hdr)); 2001 2002 /* map uof objects */ 2003 for (i = 0; i < uobj_chunk_num; i++) { 2004 ret = qat_uclo_map_obj_from_mof(mobj_handle, 2005 &mof_obj_hdr[*valid_chunks], 2006 &uobj_chunkhdr[i]); 2007 if (ret) 2008 return ret; 2009 (*valid_chunks)++; 2010 } 2011 2012 /* map suof objects */ 2013 for (i = 0; i < sobj_chunk_num; i++) { 2014 ret = qat_uclo_map_obj_from_mof(mobj_handle, 2015 &mof_obj_hdr[*valid_chunks], 2016 &sobj_chunkhdr[i]); 2017 if (ret) 2018 return ret; 2019 (*valid_chunks)++; 2020 } 2021 2022 if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunks) { 2023 pr_err("QAT: inconsistent UOF/SUOF chunk amount\n"); 2024 return EINVAL; 2025 } 2026 return 0; 2027 } 2028 2029 static void 2030 qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle *mobj_handle, 2031 struct icp_qat_mof_chunkhdr *mof_chunkhdr) 2032 { 2033 char **sym_str = (char **)&mobj_handle->sym_str; 2034 unsigned int *sym_size = &mobj_handle->sym_size; 2035 struct icp_qat_mof_str_table *str_table_obj; 2036 2037 *sym_size = *(unsigned int *)(uintptr_t)(mof_chunkhdr->offset + 2038 mobj_handle->mof_buf); 2039 *sym_str = 2040 (char *)(uintptr_t)(mobj_handle->mof_buf + mof_chunkhdr->offset + 2041 sizeof(str_table_obj->tab_len)); 2042 } 2043 2044 static void 2045 qat_uclo_map_mof_chunk(struct icp_qat_mof_handle *mobj_handle, 2046 struct icp_qat_mof_chunkhdr *mof_chunkhdr) 2047 { 2048 if (!strncmp(mof_chunkhdr->chunk_id, 2049 ICP_QAT_MOF_SYM_OBJS, 2050 ICP_QAT_MOF_OBJ_ID_LEN)) 2051 qat_uclo_map_mof_symobjs(mobj_handle, mof_chunkhdr); 2052 else if (!strncmp(mof_chunkhdr->chunk_id, 2053 ICP_QAT_UOF_OBJS, 2054 ICP_QAT_MOF_OBJ_ID_LEN)) 2055 mobj_handle->uobjs_hdr = 2056 mobj_handle->mof_buf + (unsigned long)mof_chunkhdr->offset; 2057 else if (!strncmp(mof_chunkhdr->chunk_id, 2058 ICP_QAT_SUOF_OBJS, 2059 ICP_QAT_MOF_OBJ_ID_LEN)) 2060 mobj_handle->sobjs_hdr = 2061 mobj_handle->mof_buf + (unsigned long)mof_chunkhdr->offset; 2062 } 2063 2064 static int 2065 qat_uclo_check_mof_format(const struct icp_qat_mof_file_hdr *mof_hdr) 2066 { 2067 int maj = mof_hdr->maj_ver & 0xff; 2068 int min = mof_hdr->min_ver & 0xff; 2069 2070 if (mof_hdr->file_id != ICP_QAT_MOF_FID) { 2071 pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id); 2072 return EINVAL; 2073 } 2074 2075 if (mof_hdr->num_chunks <= 0x1) { 2076 pr_err("QAT: MOF chunk amount is incorrect\n"); 2077 return EINVAL; 2078 } 2079 if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) { 2080 pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n", 2081 maj, 2082 min); 2083 return EINVAL; 2084 } 2085 return 0; 2086 } 2087 2088 static int 2089 qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle, 2090 const struct icp_qat_mof_file_hdr *mof_ptr, 2091 u32 mof_size, 2092 const char *obj_name, 2093 const char **obj_ptr, 2094 unsigned int *obj_size) 2095 { 2096 struct icp_qat_mof_handle *mobj_handle; 2097 struct icp_qat_mof_chunkhdr *mof_chunkhdr; 2098 unsigned short chunks_num; 2099 int ret; 2100 unsigned int i; 2101 2102 if (mof_ptr->file_id == ICP_QAT_UOF_FID || 2103 mof_ptr->file_id == ICP_QAT_SUOF_FID) { 2104 if (obj_ptr) 2105 *obj_ptr = (const char *)mof_ptr; 2106 if (obj_size) 2107 *obj_size = (unsigned int)mof_size; 2108 return 0; 2109 } 2110 if (qat_uclo_check_mof_format(mof_ptr)) 2111 return EINVAL; 2112 mobj_handle = malloc(sizeof(*mobj_handle), M_QAT, M_WAITOK | M_ZERO); 2113 handle->mobj_handle = mobj_handle; 2114 ret = qat_uclo_map_mof_file_hdr(handle, mof_ptr, mof_size); 2115 if (ret) 2116 return ret; 2117 mof_chunkhdr = (struct icp_qat_mof_chunkhdr *)((uintptr_t)mof_ptr + 2118 sizeof(*mof_ptr)); 2119 chunks_num = mof_ptr->num_chunks; 2120 /*Parse MOF file chunks*/ 2121 for (i = 0; i < chunks_num; i++) 2122 qat_uclo_map_mof_chunk(mobj_handle, &mof_chunkhdr[i]); 2123 /*All sym_objs uobjs and sobjs should be available*/ 2124 if (!mobj_handle->sym_str || 2125 (!mobj_handle->uobjs_hdr && !mobj_handle->sobjs_hdr)) 2126 return EINVAL; 2127 ret = qat_uclo_map_objs_from_mof(mobj_handle); 2128 if (ret) 2129 return ret; 2130 /*Seek specified uof object in MOF*/ 2131 ret = qat_uclo_seek_obj_inside_mof(mobj_handle, 2132 obj_name, 2133 obj_ptr, 2134 obj_size); 2135 if (ret) 2136 return ret; 2137 return 0; 2138 } 2139 2140 int 2141 qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, 2142 const void *addr_ptr, 2143 u32 mem_size, 2144 const char *obj_name) 2145 { 2146 const char *obj_addr; 2147 u32 obj_size; 2148 int ret; 2149 2150 BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE > 2151 (sizeof(handle->hal_handle->ae_mask) * 8)); 2152 2153 if (!handle || !addr_ptr || mem_size < 24) 2154 return EINVAL; 2155 2156 if (obj_name) { 2157 ret = qat_uclo_map_mof_obj( 2158 handle, addr_ptr, mem_size, obj_name, &obj_addr, &obj_size); 2159 if (ret) 2160 return ret; 2161 } else { 2162 obj_addr = addr_ptr; 2163 obj_size = mem_size; 2164 } 2165 2166 return (handle->fw_auth) ? 2167 qat_uclo_map_suof_obj(handle, obj_addr, obj_size) : 2168 qat_uclo_map_uof_obj(handle, obj_addr, obj_size); 2169 } 2170 2171 void 2172 qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle) 2173 { 2174 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 2175 unsigned int a; 2176 unsigned long ae_mask = handle->hal_handle->ae_mask; 2177 2178 if (handle->mobj_handle) 2179 qat_uclo_del_mof(handle); 2180 if (handle->sobj_handle) 2181 qat_uclo_del_suof(handle); 2182 if (!obj_handle) 2183 return; 2184 2185 free(obj_handle->uword_buf, M_QAT); 2186 for (a = 0; a < obj_handle->uimage_num; a++) 2187 free(obj_handle->ae_uimage[a].page, M_QAT); 2188 2189 for_each_set_bit(a, &ae_mask, handle->hal_handle->ae_max_num) 2190 { 2191 qat_uclo_free_ae_data(&obj_handle->ae_data[a]); 2192 } 2193 2194 free(obj_handle->obj_hdr, M_QAT); 2195 free(obj_handle->obj_buf, M_QAT); 2196 free(obj_handle, M_QAT); 2197 handle->obj_handle = NULL; 2198 } 2199 2200 static void 2201 qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle, 2202 struct icp_qat_uclo_encap_page *encap_page, 2203 uint64_t *uword, 2204 unsigned int addr_p, 2205 unsigned int raddr, 2206 uint64_t fill) 2207 { 2208 uint64_t uwrd = 0; 2209 unsigned int i, addr; 2210 2211 if (!encap_page) { 2212 *uword = fill; 2213 return; 2214 } 2215 addr = (encap_page->page_region) ? raddr : addr_p; 2216 for (i = 0; i < encap_page->uwblock_num; i++) { 2217 if (addr >= encap_page->uwblock[i].start_addr && 2218 addr <= encap_page->uwblock[i].start_addr + 2219 encap_page->uwblock[i].words_num - 1) { 2220 addr -= encap_page->uwblock[i].start_addr; 2221 addr *= obj_handle->uword_in_bytes; 2222 memcpy(&uwrd, 2223 (void *)(((uintptr_t)encap_page->uwblock[i] 2224 .micro_words) + 2225 addr), 2226 obj_handle->uword_in_bytes); 2227 uwrd = uwrd & 0xbffffffffffull; 2228 } 2229 } 2230 *uword = uwrd; 2231 if (*uword == INVLD_UWORD) 2232 *uword = fill; 2233 } 2234 2235 static void 2236 qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle, 2237 struct icp_qat_uclo_encap_page *encap_page, 2238 unsigned int ae) 2239 { 2240 unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen; 2241 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 2242 uint64_t fill_pat; 2243 2244 /* load the page starting at appropriate ustore address */ 2245 /* get fill-pattern from an image -- they are all the same */ 2246 memcpy(&fill_pat, 2247 obj_handle->ae_uimage[0].img_ptr->fill_pattern, 2248 sizeof(uint64_t)); 2249 uw_physical_addr = encap_page->beg_addr_p; 2250 uw_relative_addr = 0; 2251 words_num = encap_page->micro_words_num; 2252 while (words_num) { 2253 if (words_num < UWORD_CPYBUF_SIZE) 2254 cpylen = words_num; 2255 else 2256 cpylen = UWORD_CPYBUF_SIZE; 2257 2258 /* load the buffer */ 2259 for (i = 0; i < cpylen; i++) 2260 qat_uclo_fill_uwords(obj_handle, 2261 encap_page, 2262 &obj_handle->uword_buf[i], 2263 uw_physical_addr + i, 2264 uw_relative_addr + i, 2265 fill_pat); 2266 2267 if (obj_handle->ae_data[ae].shareable_ustore && 2268 !IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) 2269 /* copy the buffer to ustore */ 2270 qat_hal_wr_coalesce_uwords(handle, 2271 (unsigned char)ae, 2272 uw_physical_addr, 2273 cpylen, 2274 obj_handle->uword_buf); 2275 else 2276 /* copy the buffer to ustore */ 2277 qat_hal_wr_uwords(handle, 2278 (unsigned char)ae, 2279 uw_physical_addr, 2280 cpylen, 2281 obj_handle->uword_buf); 2282 uw_physical_addr += cpylen; 2283 uw_relative_addr += cpylen; 2284 words_num -= cpylen; 2285 } 2286 } 2287 2288 static void 2289 qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle, 2290 struct icp_qat_uof_image *image) 2291 { 2292 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 2293 unsigned int ctx_mask, s; 2294 struct icp_qat_uclo_page *page; 2295 unsigned char ae = 0; 2296 int ctx; 2297 struct icp_qat_uclo_aedata *aed; 2298 unsigned long ae_mask = handle->hal_handle->ae_mask; 2299 2300 if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX) 2301 ctx_mask = 0xff; 2302 else 2303 ctx_mask = 0x55; 2304 /* load the default page and set assigned CTX PC 2305 * to the entrypoint address 2306 */ 2307 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) 2308 { 2309 unsigned long cfg_ae_mask = handle->cfg_ae_mask; 2310 unsigned long ae_assigned = image->ae_assigned; 2311 2312 if (!test_bit(ae, &cfg_ae_mask)) 2313 continue; 2314 2315 if (!test_bit(ae, &ae_assigned)) 2316 continue; 2317 2318 aed = &obj_handle->ae_data[ae]; 2319 /* find the slice to which this image is assigned */ 2320 for (s = 0; s < aed->slice_num; s++) { 2321 if (image->ctx_assigned & 2322 aed->ae_slices[s].ctx_mask_assigned) 2323 break; 2324 } 2325 if (s >= aed->slice_num) 2326 continue; 2327 page = aed->ae_slices[s].page; 2328 if (!page->encap_page->def_page) 2329 continue; 2330 qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae); 2331 2332 page = aed->ae_slices[s].page; 2333 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) 2334 aed->ae_slices[s].cur_page[ctx] = 2335 (ctx_mask & (1 << ctx)) ? page : NULL; 2336 qat_hal_set_live_ctx(handle, 2337 (unsigned char)ae, 2338 image->ctx_assigned); 2339 qat_hal_set_pc(handle, 2340 (unsigned char)ae, 2341 image->ctx_assigned, 2342 image->entry_address); 2343 } 2344 } 2345 2346 static int 2347 qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle) 2348 { 2349 unsigned int i; 2350 struct icp_qat_fw_auth_desc *desc = NULL; 2351 struct icp_firml_dram_desc img_desc; 2352 struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; 2353 struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr; 2354 2355 for (i = 0; i < sobj_handle->img_table.num_simgs; i++) { 2356 if (qat_uclo_map_auth_fw(handle, 2357 (const char *)simg_hdr[i].simg_buf, 2358 (unsigned int)(simg_hdr[i].simg_len), 2359 &img_desc, 2360 &desc)) 2361 goto wr_err; 2362 if (qat_uclo_auth_fw(handle, desc)) 2363 goto wr_err; 2364 if (qat_uclo_is_broadcast(handle, i)) { 2365 if (qat_uclo_broadcast_load_fw(handle, desc)) 2366 goto wr_err; 2367 } else { 2368 if (qat_uclo_load_fw(handle, desc)) 2369 goto wr_err; 2370 } 2371 qat_uclo_simg_free(handle, &img_desc); 2372 } 2373 2374 return 0; 2375 wr_err: 2376 qat_uclo_simg_free(handle, &img_desc); 2377 return -EINVAL; 2378 } 2379 2380 static int 2381 qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle) 2382 { 2383 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 2384 unsigned int i; 2385 2386 if (qat_uclo_init_globals(handle)) 2387 return EINVAL; 2388 for (i = 0; i < obj_handle->uimage_num; i++) { 2389 if (!obj_handle->ae_uimage[i].img_ptr) 2390 return EINVAL; 2391 if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i])) 2392 return EINVAL; 2393 qat_uclo_wr_uimage_page(handle, 2394 obj_handle->ae_uimage[i].img_ptr); 2395 } 2396 return 0; 2397 } 2398 2399 int 2400 qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) 2401 { 2402 return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) : 2403 qat_uclo_wr_uof_img(handle); 2404 } 2405 2406 int 2407 qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle, 2408 unsigned int cfg_ae_mask) 2409 { 2410 if (!cfg_ae_mask) 2411 return EINVAL; 2412 2413 handle->cfg_ae_mask = cfg_ae_mask; 2414 return 0; 2415 } 2416