1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright(c) 2007-2025 Intel Corporation */ 3 #include "qat_freebsd.h" 4 #include "adf_cfg.h" 5 #include "adf_common_drv.h" 6 #include "adf_accel_devices.h" 7 #include "icp_qat_uclo.h" 8 #include "icp_qat_fw.h" 9 #include "icp_qat_fw_init_admin.h" 10 #include "adf_cfg_strings.h" 11 #include "adf_transport_access_macros.h" 12 #include "adf_transport_internal.h" 13 #include <sys/ctype.h> 14 #include <sys/kernel.h> 15 #include <linux/delay.h> 16 #include "adf_accel_devices.h" 17 #include "adf_common_drv.h" 18 #include "icp_qat_uclo.h" 19 #include "icp_qat_hal.h" 20 #include "icp_qat_fw_loader_handle.h" 21 22 #define UWORD_CPYBUF_SIZE 1024 23 #define INVLD_UWORD 0xffffffffffull 24 #define PID_MINOR_REV 0xf 25 #define PID_MAJOR_REV (0xf << 4) 26 #define MAX_UINT32_VAL 0xfffffffful 27 28 static int 29 qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle, 30 unsigned int ae, 31 unsigned int image_num) 32 { 33 struct icp_qat_uclo_aedata *ae_data; 34 struct icp_qat_uclo_encapme *encap_image; 35 struct icp_qat_uclo_page *page = NULL; 36 struct icp_qat_uclo_aeslice *ae_slice = NULL; 37 38 ae_data = &obj_handle->ae_data[ae]; 39 encap_image = &obj_handle->ae_uimage[image_num]; 40 ae_slice = &ae_data->ae_slices[ae_data->slice_num]; 41 ae_slice->encap_image = encap_image; 42 43 if (encap_image->img_ptr) { 44 ae_slice->ctx_mask_assigned = 45 encap_image->img_ptr->ctx_assigned; 46 ae_data->shareable_ustore = 47 ICP_QAT_SHARED_USTORE_MODE(encap_image->img_ptr->ae_mode); 48 if (obj_handle->prod_type == ICP_QAT_AC_4XXX_A_DEV_TYPE) 49 ae_data->eff_ustore_size = obj_handle->ustore_phy_size; 50 else { 51 ae_data->eff_ustore_size = ae_data->shareable_ustore ? 52 (obj_handle->ustore_phy_size << 1) : 53 obj_handle->ustore_phy_size; 54 } 55 } else { 56 ae_slice->ctx_mask_assigned = 0; 57 } 58 ae_slice->region = 59 malloc(sizeof(*ae_slice->region), M_QAT, M_WAITOK | M_ZERO); 60 ae_slice->page = 61 malloc(sizeof(*ae_slice->page), M_QAT, M_WAITOK | M_ZERO); 62 page = ae_slice->page; 63 page->encap_page = encap_image->page; 64 ae_slice->page->region = ae_slice->region; 65 ae_data->slice_num++; 66 return 0; 67 } 68 69 static int 70 qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data) 71 { 72 unsigned int i; 73 74 if (!ae_data) { 75 pr_err("QAT: bad argument, ae_data is NULL\n "); 76 return EINVAL; 77 } 78 79 for (i = 0; i < ae_data->slice_num; i++) { 80 free(ae_data->ae_slices[i].region, M_QAT); 81 ae_data->ae_slices[i].region = NULL; 82 free(ae_data->ae_slices[i].page, M_QAT); 83 ae_data->ae_slices[i].page = NULL; 84 } 85 return 0; 86 } 87 88 static char * 89 qat_uclo_get_string(struct icp_qat_uof_strtable *str_table, 90 unsigned int str_offset) 91 { 92 if (!str_table->table_len || str_offset > str_table->table_len) 93 return NULL; 94 return (char *)(((uintptr_t)(str_table->strings)) + str_offset); 95 } 96 97 static int 98 qat_uclo_check_uof_format(struct icp_qat_uof_filehdr *hdr) 99 { 100 int maj = hdr->maj_ver & 0xff; 101 int min = hdr->min_ver & 0xff; 102 103 if (hdr->file_id != ICP_QAT_UOF_FID) { 104 pr_err("QAT: Invalid header 0x%x\n", hdr->file_id); 105 return EINVAL; 106 } 107 if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) { 108 pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n", 109 maj, 110 min); 111 return EINVAL; 112 } 113 return 0; 114 } 115 116 static int 117 qat_uclo_check_suof_format(const struct icp_qat_suof_filehdr *suof_hdr) 118 { 119 int maj = suof_hdr->maj_ver & 0xff; 120 int min = suof_hdr->min_ver & 0xff; 121 122 if (suof_hdr->file_id != ICP_QAT_SUOF_FID) { 123 pr_err("QAT: invalid header 0x%x\n", suof_hdr->file_id); 124 return EINVAL; 125 } 126 if (suof_hdr->fw_type != 0) { 127 pr_err("QAT: unsupported firmware type\n"); 128 return EINVAL; 129 } 130 if (suof_hdr->num_chunks <= 0x1) { 131 pr_err("QAT: SUOF chunk amount is incorrect\n"); 132 return EINVAL; 133 } 134 if (maj != ICP_QAT_SUOF_MAJVER || min != ICP_QAT_SUOF_MINVER) { 135 pr_err("QAT: bad SUOF version, major 0x%x, minor 0x%x\n", 136 maj, 137 min); 138 return EINVAL; 139 } 140 return 0; 141 } 142 143 static int 144 qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle, 145 unsigned int addr, 146 const unsigned int *val, 147 unsigned int num_in_bytes) 148 { 149 unsigned int outval; 150 const unsigned char *ptr = (const unsigned char *)val; 151 152 if (num_in_bytes > handle->hal_sram_size) { 153 pr_err("QAT: error, mmp size overflow %d\n", num_in_bytes); 154 return EINVAL; 155 } 156 while (num_in_bytes) { 157 memcpy(&outval, ptr, 4); 158 SRAM_WRITE(handle, addr, outval); 159 num_in_bytes -= 4; 160 ptr += 4; 161 addr += 4; 162 } 163 return 0; 164 } 165 166 static void 167 qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle, 168 unsigned char ae, 169 unsigned int addr, 170 unsigned int *val, 171 unsigned int num_in_bytes) 172 { 173 unsigned int outval; 174 unsigned char *ptr = (unsigned char *)val; 175 176 addr >>= 0x2; /* convert to uword address */ 177 178 while (num_in_bytes) { 179 memcpy(&outval, ptr, 4); 180 qat_hal_wr_umem(handle, ae, addr++, 1, &outval); 181 num_in_bytes -= 4; 182 ptr += 4; 183 } 184 } 185 186 static void 187 qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle, 188 unsigned char ae, 189 struct icp_qat_uof_batch_init *umem_init_header) 190 { 191 struct icp_qat_uof_batch_init *umem_init; 192 193 if (!umem_init_header) 194 return; 195 umem_init = umem_init_header->next; 196 while (umem_init) { 197 unsigned int addr, *value, size; 198 199 ae = umem_init->ae; 200 addr = umem_init->addr; 201 value = umem_init->value; 202 size = umem_init->size; 203 qat_uclo_wr_umem_by_words(handle, ae, addr, value, size); 204 umem_init = umem_init->next; 205 } 206 } 207 208 static void 209 qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle, 210 struct icp_qat_uof_batch_init **base) 211 { 212 struct icp_qat_uof_batch_init *umem_init; 213 214 umem_init = *base; 215 while (umem_init) { 216 struct icp_qat_uof_batch_init *pre; 217 218 pre = umem_init; 219 umem_init = umem_init->next; 220 free(pre, M_QAT); 221 } 222 *base = NULL; 223 } 224 225 static int 226 qat_uclo_parse_num(char *str, unsigned int *num) 227 { 228 char buf[16] = { 0 }; 229 unsigned long ae = 0; 230 int i; 231 232 strncpy(buf, str, 15); 233 for (i = 0; i < 16; i++) { 234 if (!isdigit(buf[i])) { 235 buf[i] = '\0'; 236 break; 237 } 238 } 239 if ((compat_strtoul(buf, 10, &ae))) 240 return EFAULT; 241 242 if (ae > MAX_UINT32_VAL) 243 return EFAULT; 244 245 *num = (unsigned int)ae; 246 return 0; 247 } 248 249 static int 250 qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle, 251 struct icp_qat_uof_initmem *init_mem, 252 unsigned int size_range, 253 unsigned int *ae) 254 { 255 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 256 char *str; 257 258 if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) { 259 pr_err("QAT: initmem is out of range"); 260 return EINVAL; 261 } 262 if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) { 263 pr_err("QAT: Memory scope for init_mem error\n"); 264 return EINVAL; 265 } 266 str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name); 267 if (!str) { 268 pr_err("QAT: AE name assigned in UOF init table is NULL\n"); 269 return EINVAL; 270 } 271 if (qat_uclo_parse_num(str, ae)) { 272 pr_err("QAT: Parse num for AE number failed\n"); 273 return EINVAL; 274 } 275 if (*ae >= ICP_QAT_UCLO_MAX_AE) { 276 pr_err("QAT: ae %d out of range\n", *ae); 277 return EINVAL; 278 } 279 return 0; 280 } 281 282 static int 283 qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle *handle, 284 struct icp_qat_uof_initmem *init_mem, 285 unsigned int ae, 286 struct icp_qat_uof_batch_init **init_tab_base) 287 { 288 struct icp_qat_uof_batch_init *init_header, *tail; 289 struct icp_qat_uof_batch_init *mem_init, *tail_old; 290 struct icp_qat_uof_memvar_attr *mem_val_attr; 291 unsigned int i = 0; 292 293 mem_val_attr = 294 (struct icp_qat_uof_memvar_attr *)((uintptr_t)init_mem + 295 sizeof( 296 struct icp_qat_uof_initmem)); 297 298 init_header = *init_tab_base; 299 if (!init_header) { 300 init_header = 301 malloc(sizeof(*init_header), M_QAT, M_WAITOK | M_ZERO); 302 init_header->size = 1; 303 *init_tab_base = init_header; 304 } 305 tail_old = init_header; 306 while (tail_old->next) 307 tail_old = tail_old->next; 308 tail = tail_old; 309 for (i = 0; i < init_mem->val_attr_num; i++) { 310 mem_init = malloc(sizeof(*mem_init), M_QAT, M_WAITOK | M_ZERO); 311 mem_init->ae = ae; 312 mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte; 313 mem_init->value = &mem_val_attr->value; 314 mem_init->size = 4; 315 mem_init->next = NULL; 316 tail->next = mem_init; 317 tail = mem_init; 318 init_header->size += qat_hal_get_ins_num(); 319 mem_val_attr++; 320 } 321 return 0; 322 } 323 324 static int 325 qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle, 326 struct icp_qat_uof_initmem *init_mem) 327 { 328 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 329 unsigned int ae; 330 unsigned int lmem; 331 332 lmem = IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev))) ? 333 ICP_QAT_UCLO_MAX_LMEM_REG_2X : 334 ICP_QAT_UCLO_MAX_LMEM_REG; 335 336 if (qat_uclo_fetch_initmem_ae(handle, init_mem, lmem, &ae)) 337 return EINVAL; 338 if (qat_uclo_create_batch_init_list( 339 handle, init_mem, ae, &obj_handle->lm_init_tab[ae])) 340 return EINVAL; 341 return 0; 342 } 343 344 static int 345 qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle, 346 struct icp_qat_uof_initmem *init_mem) 347 { 348 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 349 unsigned int ae, ustore_size, uaddr, i; 350 struct icp_qat_uclo_aedata *aed; 351 352 ustore_size = obj_handle->ustore_phy_size; 353 if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae)) 354 return EINVAL; 355 if (qat_uclo_create_batch_init_list( 356 handle, init_mem, ae, &obj_handle->umem_init_tab[ae])) 357 return EINVAL; 358 /* set the highest ustore address referenced */ 359 uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2; 360 aed = &obj_handle->ae_data[ae]; 361 for (i = 0; i < aed->slice_num; i++) { 362 if (aed->ae_slices[i].encap_image->uwords_num < uaddr) 363 aed->ae_slices[i].encap_image->uwords_num = uaddr; 364 } 365 return 0; 366 } 367 368 #define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000 369 static int 370 qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle, 371 struct icp_qat_uof_initmem *init_mem) 372 { 373 switch (init_mem->region) { 374 case ICP_QAT_UOF_LMEM_REGION: 375 if (qat_uclo_init_lmem_seg(handle, init_mem)) 376 return EINVAL; 377 break; 378 case ICP_QAT_UOF_UMEM_REGION: 379 if (qat_uclo_init_umem_seg(handle, init_mem)) 380 return EINVAL; 381 break; 382 default: 383 pr_err("QAT: initmem region error. region type=0x%x\n", 384 init_mem->region); 385 return EINVAL; 386 } 387 return 0; 388 } 389 390 static int 391 qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle, 392 struct icp_qat_uclo_encapme *image) 393 { 394 unsigned int i; 395 struct icp_qat_uclo_encap_page *page; 396 struct icp_qat_uof_image *uof_image; 397 unsigned char ae = 0; 398 unsigned char neigh_ae; 399 unsigned int ustore_size; 400 unsigned int patt_pos; 401 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 402 uint64_t *fill_data; 403 static unsigned int init[32] = { 0 }; 404 unsigned long ae_mask = handle->hal_handle->ae_mask; 405 406 uof_image = image->img_ptr; 407 /*if shared CS mode, the ustore size should be 2*ustore_phy_size*/ 408 fill_data = malloc(obj_handle->ustore_phy_size * 2 * sizeof(uint64_t), 409 M_QAT, 410 M_WAITOK | M_ZERO); 411 for (i = 0; i < obj_handle->ustore_phy_size * 2; i++) 412 memcpy(&fill_data[i], 413 &uof_image->fill_pattern, 414 sizeof(uint64_t)); 415 page = image->page; 416 417 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) 418 { 419 unsigned long cfg_ae_mask = handle->cfg_ae_mask; 420 unsigned long ae_assigned = uof_image->ae_assigned; 421 const bool gen4 = 422 IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev))); 423 424 if (!test_bit(ae, &cfg_ae_mask)) 425 continue; 426 427 if (!test_bit(ae, &ae_assigned)) 428 continue; 429 430 if (obj_handle->ae_data[ae].shareable_ustore && (ae & 1) && 431 !gen4) { 432 qat_hal_get_scs_neigh_ae(ae, &neigh_ae); 433 434 if (test_bit(neigh_ae, &ae_assigned)) 435 continue; 436 } 437 438 ustore_size = obj_handle->ae_data[ae].eff_ustore_size; 439 patt_pos = page->beg_addr_p + page->micro_words_num; 440 if (obj_handle->ae_data[ae].shareable_ustore && !gen4) { 441 qat_hal_get_scs_neigh_ae(ae, &neigh_ae); 442 if (init[ae] == 0 && page->beg_addr_p != 0) { 443 qat_hal_wr_coalesce_uwords(handle, 444 (unsigned char)ae, 445 0, 446 page->beg_addr_p, 447 &fill_data[0]); 448 } 449 qat_hal_wr_coalesce_uwords( 450 handle, 451 (unsigned char)ae, 452 patt_pos, 453 ustore_size - patt_pos, 454 &fill_data[page->beg_addr_p]); 455 init[ae] = 1; 456 init[neigh_ae] = 1; 457 } else { 458 if (gen4 && (ae % 4 != 0)) 459 continue; 460 461 qat_hal_wr_uwords(handle, 462 (unsigned char)ae, 463 0, 464 page->beg_addr_p, 465 &fill_data[0]); 466 qat_hal_wr_uwords(handle, 467 (unsigned char)ae, 468 patt_pos, 469 ustore_size - patt_pos + 1, 470 &fill_data[page->beg_addr_p]); 471 } 472 } 473 free(fill_data, M_QAT); 474 return 0; 475 } 476 477 static int 478 qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle) 479 { 480 int i; 481 int ae = 0; 482 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 483 struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem; 484 unsigned long ae_mask = handle->hal_handle->ae_mask; 485 486 for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) { 487 if (initmem->num_in_bytes) { 488 if (qat_uclo_init_ae_memory(handle, initmem)) 489 return EINVAL; 490 } 491 initmem = 492 (struct icp_qat_uof_initmem 493 *)((uintptr_t)((uintptr_t)initmem + 494 sizeof(struct icp_qat_uof_initmem)) + 495 (sizeof(struct icp_qat_uof_memvar_attr) * 496 initmem->val_attr_num)); 497 } 498 499 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) 500 { 501 if (qat_hal_batch_wr_lm(handle, 502 ae, 503 obj_handle->lm_init_tab[ae])) { 504 pr_err("QAT: fail to batch init lmem for AE %d\n", ae); 505 return EINVAL; 506 } 507 qat_uclo_cleanup_batch_init_list(handle, 508 &obj_handle->lm_init_tab[ae]); 509 qat_uclo_batch_wr_umem(handle, 510 ae, 511 obj_handle->umem_init_tab[ae]); 512 qat_uclo_cleanup_batch_init_list( 513 handle, &obj_handle->umem_init_tab[ae]); 514 } 515 return 0; 516 } 517 518 static void * 519 qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr, 520 char *chunk_id, 521 void *cur) 522 { 523 int i; 524 struct icp_qat_uof_chunkhdr *chunk_hdr = 525 (struct icp_qat_uof_chunkhdr *)((uintptr_t)obj_hdr + 526 sizeof(struct icp_qat_uof_objhdr)); 527 528 for (i = 0; i < obj_hdr->num_chunks; i++) { 529 if ((cur < (void *)&chunk_hdr[i]) && 530 !strncmp(chunk_hdr[i].chunk_id, 531 chunk_id, 532 ICP_QAT_UOF_OBJID_LEN)) { 533 return &chunk_hdr[i]; 534 } 535 } 536 return NULL; 537 } 538 539 static unsigned int 540 qat_uclo_calc_checksum(unsigned int reg, int ch) 541 { 542 int i; 543 unsigned int topbit = 1 << 0xF; 544 unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch); 545 546 reg ^= inbyte << 0x8; 547 for (i = 0; i < 0x8; i++) { 548 if (reg & topbit) 549 reg = (reg << 1) ^ 0x1021; 550 else 551 reg <<= 1; 552 } 553 return reg & 0xFFFF; 554 } 555 556 static unsigned int 557 qat_uclo_calc_str_checksum(const char *ptr, int num) 558 { 559 unsigned int chksum = 0; 560 561 if (ptr) 562 while (num--) 563 chksum = qat_uclo_calc_checksum(chksum, *ptr++); 564 return chksum; 565 } 566 567 static struct icp_qat_uclo_objhdr * 568 qat_uclo_map_chunk(char *buf, 569 struct icp_qat_uof_filehdr *file_hdr, 570 char *chunk_id) 571 { 572 struct icp_qat_uof_filechunkhdr *file_chunk; 573 struct icp_qat_uclo_objhdr *obj_hdr; 574 char *chunk; 575 int i; 576 577 file_chunk = (struct icp_qat_uof_filechunkhdr 578 *)(buf + sizeof(struct icp_qat_uof_filehdr)); 579 for (i = 0; i < file_hdr->num_chunks; i++) { 580 if (!strncmp(file_chunk->chunk_id, 581 chunk_id, 582 ICP_QAT_UOF_OBJID_LEN)) { 583 chunk = buf + file_chunk->offset; 584 if (file_chunk->checksum != 585 qat_uclo_calc_str_checksum(chunk, file_chunk->size)) 586 break; 587 obj_hdr = 588 malloc(sizeof(*obj_hdr), M_QAT, M_WAITOK | M_ZERO); 589 obj_hdr->file_buff = chunk; 590 obj_hdr->checksum = file_chunk->checksum; 591 obj_hdr->size = file_chunk->size; 592 return obj_hdr; 593 } 594 file_chunk++; 595 } 596 return NULL; 597 } 598 599 static unsigned int 600 qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj, 601 struct icp_qat_uof_image *image) 602 { 603 struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab; 604 struct icp_qat_uof_objtable *neigh_reg_tab; 605 struct icp_qat_uof_code_page *code_page; 606 607 code_page = 608 (struct icp_qat_uof_code_page *)((char *)image + 609 sizeof(struct icp_qat_uof_image)); 610 uc_var_tab = 611 (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + 612 code_page->uc_var_tab_offset); 613 imp_var_tab = 614 (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + 615 code_page->imp_var_tab_offset); 616 imp_expr_tab = 617 (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + 618 code_page->imp_expr_tab_offset); 619 if (uc_var_tab->entry_num || imp_var_tab->entry_num || 620 imp_expr_tab->entry_num) { 621 pr_err("QAT: UOF can't contain imported variable to be parsed"); 622 return EINVAL; 623 } 624 neigh_reg_tab = 625 (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + 626 code_page->neigh_reg_tab_offset); 627 if (neigh_reg_tab->entry_num) { 628 pr_err("QAT: UOF can't contain neighbor register table\n"); 629 return EINVAL; 630 } 631 if (image->numpages > 1) { 632 pr_err("QAT: UOF can't contain multiple pages\n"); 633 return EINVAL; 634 } 635 if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) { 636 pr_err("QAT: UOF can't use reloadable feature\n"); 637 return EFAULT; 638 } 639 return 0; 640 } 641 642 static void 643 qat_uclo_map_image_page(struct icp_qat_uof_encap_obj *encap_uof_obj, 644 struct icp_qat_uof_image *img, 645 struct icp_qat_uclo_encap_page *page) 646 { 647 struct icp_qat_uof_code_page *code_page; 648 struct icp_qat_uof_code_area *code_area; 649 struct icp_qat_uof_objtable *uword_block_tab; 650 struct icp_qat_uof_uword_block *uwblock; 651 int i; 652 653 code_page = 654 (struct icp_qat_uof_code_page *)((char *)img + 655 sizeof(struct icp_qat_uof_image)); 656 page->def_page = code_page->def_page; 657 page->page_region = code_page->page_region; 658 page->beg_addr_v = code_page->beg_addr_v; 659 page->beg_addr_p = code_page->beg_addr_p; 660 code_area = 661 (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof + 662 code_page->code_area_offset); 663 page->micro_words_num = code_area->micro_words_num; 664 uword_block_tab = 665 (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof + 666 code_area->uword_block_tab); 667 page->uwblock_num = uword_block_tab->entry_num; 668 uwblock = (struct icp_qat_uof_uword_block 669 *)((char *)uword_block_tab + 670 sizeof(struct icp_qat_uof_objtable)); 671 page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock; 672 for (i = 0; i < uword_block_tab->entry_num; i++) 673 page->uwblock[i].micro_words = 674 (uintptr_t)encap_uof_obj->beg_uof + uwblock[i].uword_offset; 675 } 676 677 static int 678 qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle, 679 struct icp_qat_uclo_encapme *ae_uimage, 680 int max_image) 681 { 682 int i, j; 683 struct icp_qat_uof_chunkhdr *chunk_hdr = NULL; 684 struct icp_qat_uof_image *image; 685 struct icp_qat_uof_objtable *ae_regtab; 686 struct icp_qat_uof_objtable *init_reg_sym_tab; 687 struct icp_qat_uof_objtable *sbreak_tab; 688 struct icp_qat_uof_encap_obj *encap_uof_obj = 689 &obj_handle->encap_uof_obj; 690 691 for (j = 0; j < max_image; j++) { 692 chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr, 693 ICP_QAT_UOF_IMAG, 694 chunk_hdr); 695 if (!chunk_hdr) 696 break; 697 image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof + 698 chunk_hdr->offset); 699 ae_regtab = 700 (struct icp_qat_uof_objtable *)(image->reg_tab_offset + 701 obj_handle->obj_hdr 702 ->file_buff); 703 ae_uimage[j].ae_reg_num = ae_regtab->entry_num; 704 ae_uimage[j].ae_reg = 705 (struct icp_qat_uof_ae_reg 706 *)(((char *)ae_regtab) + 707 sizeof(struct icp_qat_uof_objtable)); 708 init_reg_sym_tab = 709 (struct icp_qat_uof_objtable *)(image->init_reg_sym_tab + 710 obj_handle->obj_hdr 711 ->file_buff); 712 ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num; 713 ae_uimage[j].init_regsym = 714 (struct icp_qat_uof_init_regsym 715 *)(((char *)init_reg_sym_tab) + 716 sizeof(struct icp_qat_uof_objtable)); 717 sbreak_tab = (struct icp_qat_uof_objtable *)(image->sbreak_tab + 718 obj_handle->obj_hdr 719 ->file_buff); 720 ae_uimage[j].sbreak_num = sbreak_tab->entry_num; 721 ae_uimage[j].sbreak = 722 (struct icp_qat_uof_sbreak 723 *)(((char *)sbreak_tab) + 724 sizeof(struct icp_qat_uof_objtable)); 725 ae_uimage[j].img_ptr = image; 726 if (qat_uclo_check_image_compat(encap_uof_obj, image)) 727 goto out_err; 728 ae_uimage[j].page = 729 malloc(sizeof(struct icp_qat_uclo_encap_page), 730 M_QAT, 731 M_WAITOK | M_ZERO); 732 qat_uclo_map_image_page(encap_uof_obj, 733 image, 734 ae_uimage[j].page); 735 } 736 return j; 737 out_err: 738 for (i = 0; i < j; i++) 739 free(ae_uimage[i].page, M_QAT); 740 return 0; 741 } 742 743 static int 744 UcLo_checkTGroupList2X(struct icp_qat_fw_loader_handle *handle) 745 { 746 int i; 747 unsigned int swAe = 0; 748 unsigned int ii, jj; 749 struct icp_qat_uclo_aedata *ae_data0, *ae_datax; 750 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 751 752 for (i = 0; i < obj_handle->uimage_num; i++) { 753 struct icp_qat_uof_image *image = 754 obj_handle->ae_uimage[i].img_ptr; 755 if (image->numpages > 1) { 756 pr_err( 757 "Only 1 page is allowed in a UOF for CPM2X; We found %d in %s\n", 758 image->numpages, 759 qat_uclo_get_string(&obj_handle->str_table, 760 image->img_name)); 761 return EINVAL; 762 } 763 } 764 765 for (swAe = 0; 766 (swAe < obj_handle->ae_num) && (swAe < ICP_QAT_UCLO_MAX_AE); 767 swAe += AE_TG_NUM_CPM2X) { 768 if (!qat_hal_check_ae_active(handle, swAe)) { 769 continue; 770 } 771 772 for (ii = swAe; ii < (swAe + AE_TG_NUM_CPM2X); ii++) { 773 ae_data0 = &obj_handle->ae_data[ii]; 774 if (ae_data0->slice_num != 1) // not assigned 775 continue; 776 777 for (jj = ii + 1; jj < (swAe + AE_TG_NUM_CPM2X); jj++) { 778 ae_datax = &obj_handle->ae_data[jj]; 779 if (ae_datax->slice_num != 1) // not assigned 780 continue; 781 if (ae_data0->ae_slices[0] 782 .encap_image->img_ptr != 783 ae_datax->ae_slices[0] 784 .encap_image->img_ptr) { 785 pr_err("Only 1 list is allowed in a "); 786 pr_err("Tgroup for CPM2X;\n"); 787 pr_err("ME%d, %d is assigned", ii, jj); 788 pr_err(" different list files\n"); 789 return EINVAL; 790 } 791 } 792 } 793 } 794 795 return 0; 796 } 797 798 static int 799 qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae) 800 { 801 int i; 802 int ae = 0; 803 unsigned long ae_mask = handle->hal_handle->ae_mask; 804 unsigned long cfg_ae_mask = handle->cfg_ae_mask; 805 int mflag = 0; 806 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 807 808 for_each_set_bit(ae, &ae_mask, max_ae) 809 { 810 if (!test_bit(ae, &cfg_ae_mask)) 811 continue; 812 813 for (i = 0; i < obj_handle->uimage_num; i++) { 814 unsigned long ae_assigned = 815 obj_handle->ae_uimage[i].img_ptr->ae_assigned; 816 if (!test_bit(ae, &ae_assigned)) 817 continue; 818 mflag = 1; 819 if (qat_uclo_init_ae_data(obj_handle, ae, i)) 820 return EINVAL; 821 } 822 } 823 if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { 824 if (UcLo_checkTGroupList2X(handle)) { 825 return EINVAL; 826 } 827 } 828 if (!mflag) { 829 pr_err("QAT: uimage uses AE not set"); 830 return EINVAL; 831 } 832 return 0; 833 } 834 835 static struct icp_qat_uof_strtable * 836 qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr, 837 char *tab_name, 838 struct icp_qat_uof_strtable *str_table) 839 { 840 struct icp_qat_uof_chunkhdr *chunk_hdr; 841 842 chunk_hdr = 843 qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)obj_hdr->file_buff, 844 tab_name, 845 NULL); 846 if (chunk_hdr) { 847 int hdr_size; 848 849 memcpy(&str_table->table_len, 850 obj_hdr->file_buff + chunk_hdr->offset, 851 sizeof(str_table->table_len)); 852 hdr_size = (char *)&str_table->strings - (char *)str_table; 853 str_table->strings = (uintptr_t)obj_hdr->file_buff + 854 chunk_hdr->offset + hdr_size; 855 return str_table; 856 } 857 return NULL; 858 } 859 860 static void 861 qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj, 862 struct icp_qat_uclo_init_mem_table *init_mem_tab) 863 { 864 struct icp_qat_uof_chunkhdr *chunk_hdr; 865 866 chunk_hdr = 867 qat_uclo_find_chunk(encap_uof_obj->obj_hdr, ICP_QAT_UOF_IMEM, NULL); 868 if (chunk_hdr) { 869 memmove(&init_mem_tab->entry_num, 870 encap_uof_obj->beg_uof + chunk_hdr->offset, 871 sizeof(unsigned int)); 872 init_mem_tab->init_mem = 873 (struct icp_qat_uof_initmem *)(encap_uof_obj->beg_uof + 874 chunk_hdr->offset + 875 sizeof(unsigned int)); 876 } 877 } 878 879 static unsigned int 880 qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle) 881 { 882 switch (pci_get_device(GET_DEV(handle->accel_dev))) { 883 case ADF_DH895XCC_PCI_DEVICE_ID: 884 return ICP_QAT_AC_895XCC_DEV_TYPE; 885 case ADF_C62X_PCI_DEVICE_ID: 886 return ICP_QAT_AC_C62X_DEV_TYPE; 887 case ADF_C3XXX_PCI_DEVICE_ID: 888 return ICP_QAT_AC_C3XXX_DEV_TYPE; 889 case ADF_200XX_PCI_DEVICE_ID: 890 return ICP_QAT_AC_200XX_DEV_TYPE; 891 case ADF_C4XXX_PCI_DEVICE_ID: 892 return ICP_QAT_AC_C4XXX_DEV_TYPE; 893 case ADF_4XXX_PCI_DEVICE_ID: 894 case ADF_401XX_PCI_DEVICE_ID: 895 return ICP_QAT_AC_4XXX_A_DEV_TYPE; 896 default: 897 pr_err("QAT: unsupported device 0x%x\n", 898 pci_get_device(GET_DEV(handle->accel_dev))); 899 return 0; 900 } 901 } 902 903 static int 904 qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle) 905 { 906 unsigned int maj_ver, prod_type = obj_handle->prod_type; 907 908 if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->ac_dev_type)) { 909 pr_err("QAT: UOF type 0x%x doesn't match with platform 0x%x\n", 910 obj_handle->encap_uof_obj.obj_hdr->ac_dev_type, 911 prod_type); 912 return EINVAL; 913 } 914 maj_ver = obj_handle->prod_rev & 0xff; 915 if (obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver || 916 obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver) { 917 pr_err("QAT: UOF maj_ver 0x%x out of range\n", maj_ver); 918 return EINVAL; 919 } 920 return 0; 921 } 922 923 static int 924 qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle, 925 unsigned char ae, 926 unsigned char ctx_mask, 927 enum icp_qat_uof_regtype reg_type, 928 unsigned short reg_addr, 929 unsigned int value) 930 { 931 switch (reg_type) { 932 case ICP_GPA_ABS: 933 case ICP_GPB_ABS: 934 ctx_mask = 0; 935 return qat_hal_init_gpr( 936 handle, ae, ctx_mask, reg_type, reg_addr, value); 937 case ICP_GPA_REL: 938 case ICP_GPB_REL: 939 return qat_hal_init_gpr( 940 handle, ae, ctx_mask, reg_type, reg_addr, value); 941 case ICP_SR_ABS: 942 case ICP_DR_ABS: 943 case ICP_SR_RD_ABS: 944 case ICP_DR_RD_ABS: 945 ctx_mask = 0; 946 return qat_hal_init_rd_xfer( 947 handle, ae, ctx_mask, reg_type, reg_addr, value); 948 case ICP_SR_REL: 949 case ICP_DR_REL: 950 case ICP_SR_RD_REL: 951 case ICP_DR_RD_REL: 952 return qat_hal_init_rd_xfer( 953 handle, ae, ctx_mask, reg_type, reg_addr, value); 954 case ICP_SR_WR_ABS: 955 case ICP_DR_WR_ABS: 956 ctx_mask = 0; 957 return qat_hal_init_wr_xfer( 958 handle, ae, ctx_mask, reg_type, reg_addr, value); 959 case ICP_SR_WR_REL: 960 case ICP_DR_WR_REL: 961 return qat_hal_init_wr_xfer( 962 handle, ae, ctx_mask, reg_type, reg_addr, value); 963 case ICP_NEIGH_REL: 964 return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value); 965 default: 966 pr_err("QAT: UOF uses unsupported reg type 0x%x\n", reg_type); 967 return EFAULT; 968 } 969 return 0; 970 } 971 972 static int 973 qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle, 974 unsigned int ae, 975 struct icp_qat_uclo_encapme *encap_ae) 976 { 977 unsigned int i; 978 unsigned char ctx_mask; 979 struct icp_qat_uof_init_regsym *init_regsym; 980 981 if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) == 982 ICP_QAT_UCLO_MAX_CTX) 983 ctx_mask = 0xff; 984 else 985 ctx_mask = 0x55; 986 987 for (i = 0; i < encap_ae->init_regsym_num; i++) { 988 unsigned int exp_res; 989 990 init_regsym = &encap_ae->init_regsym[i]; 991 exp_res = init_regsym->value; 992 switch (init_regsym->init_type) { 993 case ICP_QAT_UOF_INIT_REG: 994 qat_uclo_init_reg(handle, 995 ae, 996 ctx_mask, 997 (enum icp_qat_uof_regtype) 998 init_regsym->reg_type, 999 (unsigned short)init_regsym->reg_addr, 1000 exp_res); 1001 break; 1002 case ICP_QAT_UOF_INIT_REG_CTX: 1003 /* check if ctx is appropriate for the ctxMode */ 1004 if (!((1 << init_regsym->ctx) & ctx_mask)) { 1005 pr_err("QAT: invalid ctx num = 0x%x\n", 1006 init_regsym->ctx); 1007 return EINVAL; 1008 } 1009 qat_uclo_init_reg( 1010 handle, 1011 ae, 1012 (unsigned char)(1 << init_regsym->ctx), 1013 (enum icp_qat_uof_regtype)init_regsym->reg_type, 1014 (unsigned short)init_regsym->reg_addr, 1015 exp_res); 1016 break; 1017 case ICP_QAT_UOF_INIT_EXPR: 1018 pr_err("QAT: INIT_EXPR feature not supported\n"); 1019 return EINVAL; 1020 case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP: 1021 pr_err("QAT: INIT_EXPR_ENDIAN_SWAP not supported\n"); 1022 return EINVAL; 1023 default: 1024 break; 1025 } 1026 } 1027 return 0; 1028 } 1029 1030 static int 1031 qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle) 1032 { 1033 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 1034 unsigned int s; 1035 unsigned int ae = 0; 1036 struct icp_qat_uclo_aedata *aed; 1037 unsigned long ae_mask = handle->hal_handle->ae_mask; 1038 1039 if (obj_handle->global_inited) 1040 return 0; 1041 if (obj_handle->init_mem_tab.entry_num) { 1042 if (qat_uclo_init_memory(handle)) { 1043 pr_err("QAT: initialize memory failed\n"); 1044 return EINVAL; 1045 } 1046 } 1047 1048 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) 1049 { 1050 aed = &obj_handle->ae_data[ae]; 1051 for (s = 0; s < aed->slice_num; s++) { 1052 if (!aed->ae_slices[s].encap_image) 1053 continue; 1054 if (qat_uclo_init_reg_sym( 1055 handle, ae, aed->ae_slices[s].encap_image)) 1056 return EINVAL; 1057 } 1058 } 1059 obj_handle->global_inited = 1; 1060 return 0; 1061 } 1062 1063 static int 1064 qat_hal_set_modes(struct icp_qat_fw_loader_handle *handle, 1065 struct icp_qat_uclo_objhandle *obj_handle, 1066 unsigned char ae, 1067 struct icp_qat_uof_image *uof_image) 1068 { 1069 unsigned char nn_mode; 1070 char ae_mode = 0; 1071 1072 ae_mode = (char)ICP_QAT_CTX_MODE(uof_image->ae_mode); 1073 if (qat_hal_set_ae_ctx_mode(handle, ae, ae_mode)) { 1074 pr_err("QAT: qat_hal_set_ae_ctx_mode error\n"); 1075 return EFAULT; 1076 } 1077 1078 ae_mode = (char)ICP_QAT_SHARED_USTORE_MODE(uof_image->ae_mode); 1079 qat_hal_set_ae_scs_mode(handle, ae, ae_mode); 1080 if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { 1081 nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode); 1082 1083 if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) { 1084 pr_err("QAT: qat_hal_set_ae_nn_mode error\n"); 1085 return EFAULT; 1086 } 1087 } 1088 ae_mode = (char)ICP_QAT_LOC_MEM0_MODE(uof_image->ae_mode); 1089 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0, ae_mode)) { 1090 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n"); 1091 return EFAULT; 1092 } 1093 ae_mode = (char)ICP_QAT_LOC_MEM1_MODE(uof_image->ae_mode); 1094 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1, ae_mode)) { 1095 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n"); 1096 return EFAULT; 1097 } 1098 if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { 1099 ae_mode = (char)ICP_QAT_LOC_MEM2_MODE(uof_image->ae_mode); 1100 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM2, ae_mode)) { 1101 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM2 error\n"); 1102 return EFAULT; 1103 } 1104 ae_mode = (char)ICP_QAT_LOC_MEM3_MODE(uof_image->ae_mode); 1105 if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM3, ae_mode)) { 1106 pr_err("QAT: qat_hal_set_ae_lm_mode LMEM3 error\n"); 1107 return EFAULT; 1108 } 1109 ae_mode = (char)ICP_QAT_LOC_TINDEX_MODE(uof_image->ae_mode); 1110 qat_hal_set_ae_tindex_mode(handle, ae, ae_mode); 1111 } 1112 return 0; 1113 } 1114 1115 static int 1116 qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle) 1117 { 1118 int error; 1119 unsigned char s; 1120 unsigned char ae = 0; 1121 struct icp_qat_uof_image *uof_image; 1122 struct icp_qat_uclo_aedata *ae_data; 1123 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 1124 unsigned long ae_mask = handle->hal_handle->ae_mask; 1125 1126 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) 1127 { 1128 unsigned long cfg_ae_mask = handle->cfg_ae_mask; 1129 1130 if (!test_bit(ae, &cfg_ae_mask)) 1131 continue; 1132 1133 ae_data = &obj_handle->ae_data[ae]; 1134 for (s = 0; s < min_t(unsigned int, 1135 ae_data->slice_num, 1136 ICP_QAT_UCLO_MAX_CTX); 1137 s++) { 1138 if (!obj_handle->ae_data[ae].ae_slices[s].encap_image) 1139 continue; 1140 uof_image = ae_data->ae_slices[s].encap_image->img_ptr; 1141 error = qat_hal_set_modes(handle, 1142 obj_handle, 1143 ae, 1144 uof_image); 1145 if (error) 1146 return error; 1147 } 1148 } 1149 return 0; 1150 } 1151 1152 static void 1153 qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle) 1154 { 1155 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 1156 struct icp_qat_uclo_encapme *image; 1157 int a; 1158 1159 for (a = 0; a < obj_handle->uimage_num; a++) { 1160 image = &obj_handle->ae_uimage[a]; 1161 image->uwords_num = 1162 image->page->beg_addr_p + image->page->micro_words_num; 1163 } 1164 } 1165 1166 static int 1167 qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle) 1168 { 1169 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 1170 unsigned int ae; 1171 1172 obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff; 1173 obj_handle->encap_uof_obj.obj_hdr = 1174 (struct icp_qat_uof_objhdr *)obj_handle->obj_hdr->file_buff; 1175 obj_handle->uword_in_bytes = 6; 1176 obj_handle->prod_type = qat_uclo_get_dev_type(handle); 1177 obj_handle->prod_rev = 1178 PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id); 1179 if (qat_uclo_check_uof_compat(obj_handle)) { 1180 pr_err("QAT: UOF incompatible\n"); 1181 return EINVAL; 1182 } 1183 obj_handle->uword_buf = malloc(UWORD_CPYBUF_SIZE * sizeof(uint64_t), 1184 M_QAT, 1185 M_WAITOK | M_ZERO); 1186 obj_handle->ustore_phy_size = 1187 (obj_handle->prod_type == ICP_QAT_AC_C4XXX_DEV_TYPE) ? 0x2000 : 1188 0x4000; 1189 if (!obj_handle->obj_hdr->file_buff || 1190 !qat_uclo_map_str_table(obj_handle->obj_hdr, 1191 ICP_QAT_UOF_STRT, 1192 &obj_handle->str_table)) { 1193 pr_err("QAT: UOF doesn't have effective images\n"); 1194 goto out_err; 1195 } 1196 obj_handle->uimage_num = 1197 qat_uclo_map_uimage(obj_handle, 1198 obj_handle->ae_uimage, 1199 ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX); 1200 if (!obj_handle->uimage_num) 1201 goto out_err; 1202 if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) { 1203 pr_err("QAT: Bad object\n"); 1204 goto out_check_uof_aemask_err; 1205 } 1206 qat_uclo_init_uword_num(handle); 1207 qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj, 1208 &obj_handle->init_mem_tab); 1209 if (qat_uclo_set_ae_mode(handle)) 1210 goto out_check_uof_aemask_err; 1211 return 0; 1212 out_check_uof_aemask_err: 1213 for (ae = 0; ae < obj_handle->uimage_num; ae++) 1214 free(obj_handle->ae_uimage[ae].page, M_QAT); 1215 out_err: 1216 free(obj_handle->uword_buf, M_QAT); 1217 obj_handle->uword_buf = NULL; 1218 return EFAULT; 1219 } 1220 1221 static int 1222 qat_uclo_map_suof_file_hdr(const struct icp_qat_fw_loader_handle *handle, 1223 const struct icp_qat_suof_filehdr *suof_ptr, 1224 int suof_size) 1225 { 1226 unsigned int check_sum = 0; 1227 unsigned int min_ver_offset = 0; 1228 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; 1229 1230 suof_handle->file_id = ICP_QAT_SUOF_FID; 1231 suof_handle->suof_buf = (const char *)suof_ptr; 1232 suof_handle->suof_size = suof_size; 1233 min_ver_offset = 1234 suof_size - offsetof(struct icp_qat_suof_filehdr, min_ver); 1235 check_sum = qat_uclo_calc_str_checksum((const char *)&suof_ptr->min_ver, 1236 min_ver_offset); 1237 if (check_sum != suof_ptr->check_sum) { 1238 pr_err("QAT: incorrect SUOF checksum\n"); 1239 return EINVAL; 1240 } 1241 suof_handle->check_sum = suof_ptr->check_sum; 1242 suof_handle->min_ver = suof_ptr->min_ver; 1243 suof_handle->maj_ver = suof_ptr->maj_ver; 1244 suof_handle->fw_type = suof_ptr->fw_type; 1245 return 0; 1246 } 1247 1248 static void 1249 qat_uclo_map_simg(struct icp_qat_fw_loader_handle *handle, 1250 struct icp_qat_suof_img_hdr *suof_img_hdr, 1251 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) 1252 { 1253 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; 1254 const struct icp_qat_simg_ae_mode *ae_mode; 1255 struct icp_qat_suof_objhdr *suof_objhdr; 1256 unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev)); 1257 1258 suof_img_hdr->simg_buf = 1259 (suof_handle->suof_buf + suof_chunk_hdr->offset + 1260 sizeof(*suof_objhdr)); 1261 suof_img_hdr->simg_len = 1262 ((struct icp_qat_suof_objhdr *)(uintptr_t)(suof_handle->suof_buf + 1263 suof_chunk_hdr->offset)) 1264 ->img_length; 1265 1266 suof_img_hdr->css_header = suof_img_hdr->simg_buf; 1267 suof_img_hdr->css_key = 1268 (suof_img_hdr->css_header + sizeof(struct icp_qat_css_hdr)); 1269 suof_img_hdr->css_signature = suof_img_hdr->css_key + 1270 ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) + 1271 ICP_QAT_CSS_FWSK_EXPONENT_LEN(device_id); 1272 suof_img_hdr->css_simg = 1273 suof_img_hdr->css_signature + ICP_QAT_CSS_SIGNATURE_LEN(device_id); 1274 1275 ae_mode = (const struct icp_qat_simg_ae_mode *)(suof_img_hdr->css_simg); 1276 suof_img_hdr->ae_mask = ae_mode->ae_mask; 1277 suof_img_hdr->simg_name = (unsigned long)&ae_mode->simg_name; 1278 suof_img_hdr->appmeta_data = (unsigned long)&ae_mode->appmeta_data; 1279 suof_img_hdr->fw_type = ae_mode->fw_type; 1280 } 1281 1282 static void 1283 qat_uclo_map_suof_symobjs(struct icp_qat_suof_handle *suof_handle, 1284 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr) 1285 { 1286 char **sym_str = (char **)&suof_handle->sym_str; 1287 unsigned int *sym_size = &suof_handle->sym_size; 1288 struct icp_qat_suof_strtable *str_table_obj; 1289 1290 *sym_size = *(unsigned int *)(uintptr_t)(suof_chunk_hdr->offset + 1291 suof_handle->suof_buf); 1292 *sym_str = 1293 (char *)(uintptr_t)(suof_handle->suof_buf + suof_chunk_hdr->offset + 1294 sizeof(str_table_obj->tab_length)); 1295 } 1296 1297 static int 1298 qat_uclo_check_simg_compat(struct icp_qat_fw_loader_handle *handle, 1299 struct icp_qat_suof_img_hdr *img_hdr) 1300 { 1301 const struct icp_qat_simg_ae_mode *img_ae_mode = NULL; 1302 unsigned int prod_rev, maj_ver, prod_type; 1303 1304 prod_type = qat_uclo_get_dev_type(handle); 1305 img_ae_mode = (const struct icp_qat_simg_ae_mode *)img_hdr->css_simg; 1306 prod_rev = 1307 PID_MAJOR_REV | (PID_MINOR_REV & handle->hal_handle->revision_id); 1308 if (img_ae_mode->dev_type != prod_type) { 1309 pr_err("QAT: incompatible product type %x\n", 1310 img_ae_mode->dev_type); 1311 return EINVAL; 1312 } 1313 maj_ver = prod_rev & 0xff; 1314 if (maj_ver > img_ae_mode->devmax_ver || 1315 maj_ver < img_ae_mode->devmin_ver) { 1316 pr_err("QAT: incompatible device maj_ver 0x%x\n", maj_ver); 1317 return EINVAL; 1318 } 1319 return 0; 1320 } 1321 1322 static void 1323 qat_uclo_del_suof(struct icp_qat_fw_loader_handle *handle) 1324 { 1325 struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; 1326 1327 free(sobj_handle->img_table.simg_hdr, M_QAT); 1328 sobj_handle->img_table.simg_hdr = NULL; 1329 free(handle->sobj_handle, M_QAT); 1330 handle->sobj_handle = NULL; 1331 } 1332 1333 static void 1334 qat_uclo_tail_img(struct icp_qat_suof_img_hdr *suof_img_hdr, 1335 unsigned int img_id, 1336 unsigned int num_simgs) 1337 { 1338 struct icp_qat_suof_img_hdr img_header; 1339 1340 if ((img_id != num_simgs - 1) && img_id != ICP_QAT_UCLO_MAX_AE) { 1341 memcpy(&img_header, 1342 &suof_img_hdr[num_simgs - 1], 1343 sizeof(*suof_img_hdr)); 1344 memcpy(&suof_img_hdr[num_simgs - 1], 1345 &suof_img_hdr[img_id], 1346 sizeof(*suof_img_hdr)); 1347 memcpy(&suof_img_hdr[img_id], 1348 &img_header, 1349 sizeof(*suof_img_hdr)); 1350 } 1351 } 1352 1353 static int 1354 qat_uclo_map_suof(struct icp_qat_fw_loader_handle *handle, 1355 const struct icp_qat_suof_filehdr *suof_ptr, 1356 int suof_size) 1357 { 1358 struct icp_qat_suof_handle *suof_handle = handle->sobj_handle; 1359 struct icp_qat_suof_chunk_hdr *suof_chunk_hdr = NULL; 1360 struct icp_qat_suof_img_hdr *suof_img_hdr = NULL; 1361 int ret = 0, ae0_img = ICP_QAT_UCLO_MAX_AE, 1362 aeMax_img = ICP_QAT_UCLO_MAX_AE; 1363 unsigned int i = 0; 1364 struct icp_qat_suof_img_hdr img_header; 1365 1366 if (!suof_ptr || suof_size == 0) { 1367 pr_err("QAT: input parameter SUOF pointer/size is NULL\n"); 1368 return EINVAL; 1369 } 1370 if (qat_uclo_check_suof_format(suof_ptr)) 1371 return EINVAL; 1372 ret = qat_uclo_map_suof_file_hdr(handle, suof_ptr, suof_size); 1373 if (ret) 1374 return ret; 1375 suof_chunk_hdr = (struct icp_qat_suof_chunk_hdr *)((uintptr_t)suof_ptr + 1376 sizeof(*suof_ptr)); 1377 1378 qat_uclo_map_suof_symobjs(suof_handle, suof_chunk_hdr); 1379 suof_handle->img_table.num_simgs = suof_ptr->num_chunks - 1; 1380 1381 if (suof_handle->img_table.num_simgs != 0) { 1382 suof_img_hdr = malloc(suof_handle->img_table.num_simgs * 1383 sizeof(img_header), 1384 M_QAT, 1385 M_WAITOK | M_ZERO); 1386 suof_handle->img_table.simg_hdr = suof_img_hdr; 1387 } 1388 1389 for (i = 0; i < suof_handle->img_table.num_simgs; i++) { 1390 qat_uclo_map_simg(handle, 1391 &suof_img_hdr[i], 1392 &suof_chunk_hdr[1 + i]); 1393 ret = qat_uclo_check_simg_compat(handle, &suof_img_hdr[i]); 1394 if (ret) 1395 return ret; 1396 suof_img_hdr[i].ae_mask &= handle->cfg_ae_mask; 1397 if ((suof_img_hdr[i].ae_mask & 0x1) != 0) 1398 ae0_img = i; 1399 } 1400 1401 if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { 1402 qat_uclo_tail_img(suof_img_hdr, 1403 ae0_img, 1404 suof_handle->img_table.num_simgs); 1405 } else { 1406 if (suof_handle->img_table.num_simgs == 1) 1407 return 0; 1408 qat_uclo_tail_img(suof_img_hdr, 1409 ae0_img, 1410 suof_handle->img_table.num_simgs - 1); 1411 for (i = 0; i < suof_handle->img_table.num_simgs; i++) { 1412 if ((suof_img_hdr[i].ae_mask & 1413 (0x1 << (handle->hal_handle->ae_max_num - 1))) != 1414 0) { 1415 aeMax_img = i; 1416 break; 1417 } 1418 } 1419 qat_uclo_tail_img(suof_img_hdr, 1420 aeMax_img, 1421 suof_handle->img_table.num_simgs); 1422 } 1423 return 0; 1424 } 1425 1426 #define ADD_ADDR(high, low) ((((uint64_t)high) << 32) + (low)) 1427 #define BITS_IN_DWORD 32 1428 1429 static int 1430 qat_uclo_auth_fw(struct icp_qat_fw_loader_handle *handle, 1431 struct icp_qat_fw_auth_desc *desc) 1432 { 1433 unsigned int fcu_sts, mem_cfg_err, retry = 0; 1434 unsigned int fcu_ctl_csr, fcu_sts_csr; 1435 unsigned int fcu_dram_hi_csr, fcu_dram_lo_csr; 1436 u64 bus_addr; 1437 1438 bus_addr = ADD_ADDR(desc->css_hdr_high, desc->css_hdr_low) - 1439 sizeof(struct icp_qat_auth_chunk); 1440 if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { 1441 fcu_ctl_csr = FCU_CONTROL_C4XXX; 1442 fcu_sts_csr = FCU_STATUS_C4XXX; 1443 fcu_dram_hi_csr = FCU_DRAM_ADDR_HI_C4XXX; 1444 fcu_dram_lo_csr = FCU_DRAM_ADDR_LO_C4XXX; 1445 } else { 1446 fcu_ctl_csr = FCU_CONTROL; 1447 fcu_sts_csr = FCU_STATUS; 1448 fcu_dram_hi_csr = FCU_DRAM_ADDR_HI; 1449 fcu_dram_lo_csr = FCU_DRAM_ADDR_LO; 1450 } 1451 SET_FCU_CSR(handle, fcu_dram_hi_csr, (bus_addr >> BITS_IN_DWORD)); 1452 SET_FCU_CSR(handle, fcu_dram_lo_csr, bus_addr); 1453 SET_FCU_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_AUTH); 1454 1455 do { 1456 pause_ms("adfstop", FW_AUTH_WAIT_PERIOD); 1457 fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr); 1458 if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_FAIL) 1459 goto auth_fail; 1460 if (((fcu_sts >> FCU_STS_AUTHFWLD_POS) & 0x1)) 1461 if ((fcu_sts & FCU_AUTH_STS_MASK) == FCU_STS_VERI_DONE) 1462 return 0; 1463 } while (retry++ < FW_AUTH_MAX_RETRY); 1464 auth_fail: 1465 pr_err("QAT: authentication error (FCU_STATUS = 0x%x),retry = %d\n", 1466 fcu_sts & FCU_AUTH_STS_MASK, 1467 retry); 1468 if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) { 1469 mem_cfg_err = 1470 (GET_FCU_CSR(handle, FCU_STATUS1_C4XXX) & MEM_CFG_ERR_BIT); 1471 if (mem_cfg_err) 1472 pr_err("QAT: MEM_CFG_ERR\n"); 1473 } 1474 return EINVAL; 1475 } 1476 1477 static int 1478 qat_uclo_is_broadcast(struct icp_qat_fw_loader_handle *handle, int imgid) 1479 { 1480 struct icp_qat_suof_handle *sobj_handle; 1481 1482 if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) 1483 return 0; 1484 1485 sobj_handle = (struct icp_qat_suof_handle *)handle->sobj_handle; 1486 if (handle->hal_handle->admin_ae_mask & 1487 sobj_handle->img_table.simg_hdr[imgid].ae_mask) 1488 return 0; 1489 1490 return 1; 1491 } 1492 1493 static int 1494 qat_uclo_broadcast_load_fw(struct icp_qat_fw_loader_handle *handle, 1495 struct icp_qat_fw_auth_desc *desc) 1496 { 1497 unsigned int i = 0; 1498 unsigned int fcuSts = 0, fcuAeBroadcastMask = 0; 1499 unsigned int retry = 0; 1500 unsigned int fcuStsCsr = 0; 1501 unsigned int fcuCtlCsr = 0; 1502 unsigned int loadedAes = 0; 1503 unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev)); 1504 1505 if (IS_QAT_GEN4(device_id)) { 1506 fcuCtlCsr = FCU_CONTROL_4XXX; 1507 fcuStsCsr = FCU_STATUS_4XXX; 1508 } else { 1509 pr_err("Uclo_BroadcastLoadFW only applicable for CPM20\n"); 1510 return EINVAL; 1511 } 1512 1513 for (i = 0; i < ICP_QAT_UCLO_MAX_AE; i++) { 1514 if (!test_bit(i, (unsigned long *)&handle->hal_handle->ae_mask)) 1515 continue; 1516 1517 if (qat_hal_check_ae_active(handle, (unsigned char)i)) { 1518 pr_err( 1519 "Uclo_BroadcastLoadFW error (invalid AE status)\n"); 1520 return EINVAL; 1521 } 1522 1523 if ((desc->ae_mask >> i) & 0x1) { 1524 fcuAeBroadcastMask |= 1 << i; 1525 } 1526 } 1527 1528 if (fcuAeBroadcastMask) { 1529 retry = 0; 1530 SET_FCU_CSR(handle, 1531 FCU_ME_BROADCAST_MASK_TYPE, 1532 fcuAeBroadcastMask); 1533 SET_FCU_CSR(handle, fcuCtlCsr, FCU_CTRL_CMD_LOAD); 1534 do { 1535 msleep(FW_AUTH_WAIT_PERIOD); 1536 fcuSts = GET_FCU_CSR(handle, fcuStsCsr); 1537 1538 if ((fcuSts & FCU_AUTH_STS_MASK) == FCU_STS_LOAD_FAIL) { 1539 pr_err( 1540 "Uclo_BroadcastLoadFW fail (fcu_status = 0x%x)\n", 1541 fcuSts & FCU_AUTH_STS_MASK); 1542 return EINVAL; 1543 } else if ((fcuSts & FCU_AUTH_STS_MASK) == 1544 FCU_STS_LOAD_DONE) { 1545 if (IS_QAT_GEN4(device_id)) 1546 loadedAes = 1547 GET_FCU_CSR(handle, 1548 FCU_AE_LOADED_4XXX); 1549 else 1550 loadedAes = 1551 (fcuSts >> FCU_LOADED_AE_POS); 1552 1553 if ((loadedAes & fcuAeBroadcastMask) == 1554 fcuAeBroadcastMask) 1555 break; 1556 } else if ((fcuSts & FCU_AUTH_STS_MASK) == 1557 FCU_STS_VERI_DONE) { 1558 SET_FCU_CSR(handle, 1559 fcuCtlCsr, 1560 FCU_CTRL_CMD_LOAD); 1561 } 1562 } while (retry++ < FW_BROADCAST_MAX_RETRY); 1563 if (retry > FW_BROADCAST_MAX_RETRY) { 1564 pr_err( 1565 "Uclo_BroadcastLoadFW fail(fcu_status = 0x%x),retry = %d\n", 1566 fcuSts & FCU_AUTH_STS_MASK, 1567 retry); 1568 return EINVAL; 1569 } 1570 } 1571 return 0; 1572 } 1573 1574 static int 1575 qat_uclo_simg_alloc(struct icp_qat_fw_loader_handle *handle, 1576 struct icp_firml_dram_desc *dram_desc, 1577 unsigned int size) 1578 { 1579 int ret; 1580 1581 ret = bus_dma_mem_create(&dram_desc->dram_mem, 1582 handle->accel_dev->dma_tag, 1583 1, 1584 BUS_SPACE_MAXADDR, 1585 size, 1586 0); 1587 if (ret != 0) 1588 return ret; 1589 dram_desc->dram_base_addr_v = dram_desc->dram_mem.dma_vaddr; 1590 dram_desc->dram_bus_addr = dram_desc->dram_mem.dma_baddr; 1591 dram_desc->dram_size = size; 1592 return 0; 1593 } 1594 1595 static void 1596 qat_uclo_simg_free(struct icp_qat_fw_loader_handle *handle, 1597 struct icp_firml_dram_desc *dram_desc) 1598 { 1599 if (handle && dram_desc && dram_desc->dram_base_addr_v) 1600 bus_dma_mem_free(&dram_desc->dram_mem); 1601 1602 if (dram_desc) 1603 explicit_bzero(dram_desc, sizeof(*dram_desc)); 1604 } 1605 1606 static int 1607 qat_uclo_map_auth_fw(struct icp_qat_fw_loader_handle *handle, 1608 const char *image, 1609 unsigned int size, 1610 struct icp_firml_dram_desc *img_desc, 1611 struct icp_qat_fw_auth_desc **desc) 1612 { 1613 const struct icp_qat_css_hdr *css_hdr = 1614 (const struct icp_qat_css_hdr *)image; 1615 struct icp_qat_fw_auth_desc *auth_desc; 1616 struct icp_qat_auth_chunk *auth_chunk; 1617 u64 virt_addr, bus_addr, virt_base; 1618 unsigned int length, simg_offset = sizeof(*auth_chunk); 1619 unsigned int device_id = pci_get_device(GET_DEV(handle->accel_dev)); 1620 1621 if (size <= ICP_QAT_AE_IMG_OFFSET(device_id)) { 1622 pr_err("QAT: error, input image size too small %d\n", size); 1623 return EINVAL; 1624 } 1625 1626 if (size > 1627 (ICP_QAT_AE_IMG_OFFSET(device_id) + ICP_QAT_CSS_MAX_IMAGE_LEN)) { 1628 pr_err("QAT: error, input image size overflow %d\n", size); 1629 return EINVAL; 1630 } 1631 1632 length = (css_hdr->fw_type == CSS_AE_FIRMWARE) ? 1633 ICP_QAT_CSS_AE_SIMG_LEN(device_id) + simg_offset : 1634 size + ICP_QAT_CSS_FWSK_PAD_LEN(device_id) + simg_offset; 1635 if (qat_uclo_simg_alloc(handle, img_desc, length)) { 1636 pr_err("QAT: error, allocate continuous dram fail\n"); 1637 return -ENOMEM; 1638 } 1639 1640 auth_chunk = img_desc->dram_base_addr_v; 1641 auth_chunk->chunk_size = img_desc->dram_size; 1642 auth_chunk->chunk_bus_addr = img_desc->dram_bus_addr; 1643 virt_base = (uintptr_t)img_desc->dram_base_addr_v + simg_offset; 1644 bus_addr = img_desc->dram_bus_addr + simg_offset; 1645 auth_desc = img_desc->dram_base_addr_v; 1646 auth_desc->css_hdr_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); 1647 auth_desc->css_hdr_low = (unsigned int)bus_addr; 1648 virt_addr = virt_base; 1649 1650 memcpy((void *)(uintptr_t)virt_addr, image, sizeof(*css_hdr)); 1651 /* pub key */ 1652 bus_addr = ADD_ADDR(auth_desc->css_hdr_high, auth_desc->css_hdr_low) + 1653 sizeof(*css_hdr); 1654 virt_addr = virt_addr + sizeof(*css_hdr); 1655 1656 auth_desc->fwsk_pub_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); 1657 auth_desc->fwsk_pub_low = (unsigned int)bus_addr; 1658 1659 memcpy((void *)(uintptr_t)virt_addr, 1660 (const void *)(image + sizeof(*css_hdr)), 1661 ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id)); 1662 /* padding */ 1663 explicit_bzero((void *)(uintptr_t)( 1664 virt_addr + ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id)), 1665 ICP_QAT_CSS_FWSK_PAD_LEN(device_id)); 1666 1667 /* exponent */ 1668 memcpy((void *)(uintptr_t)(virt_addr + 1669 ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) + 1670 ICP_QAT_CSS_FWSK_PAD_LEN(device_id)), 1671 (const void *)(image + sizeof(*css_hdr) + 1672 ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id)), 1673 sizeof(unsigned int)); 1674 1675 /* signature */ 1676 bus_addr = ADD_ADDR(auth_desc->fwsk_pub_high, auth_desc->fwsk_pub_low) + 1677 ICP_QAT_CSS_FWSK_PUB_LEN(device_id); 1678 virt_addr = virt_addr + ICP_QAT_CSS_FWSK_PUB_LEN(device_id); 1679 auth_desc->signature_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); 1680 auth_desc->signature_low = (unsigned int)bus_addr; 1681 1682 memcpy((void *)(uintptr_t)virt_addr, 1683 (const void *)(image + sizeof(*css_hdr) + 1684 ICP_QAT_CSS_FWSK_MODULUS_LEN(device_id) + 1685 ICP_QAT_CSS_FWSK_EXPONENT_LEN(device_id)), 1686 ICP_QAT_CSS_SIGNATURE_LEN(device_id)); 1687 1688 bus_addr = 1689 ADD_ADDR(auth_desc->signature_high, auth_desc->signature_low) + 1690 ICP_QAT_CSS_SIGNATURE_LEN(device_id); 1691 virt_addr += ICP_QAT_CSS_SIGNATURE_LEN(device_id); 1692 1693 auth_desc->img_high = (unsigned int)(bus_addr >> BITS_IN_DWORD); 1694 auth_desc->img_low = (unsigned int)bus_addr; 1695 auth_desc->img_len = size - ICP_QAT_AE_IMG_OFFSET(device_id); 1696 memcpy((void *)(uintptr_t)virt_addr, 1697 (const void *)(image + ICP_QAT_AE_IMG_OFFSET(device_id)), 1698 auth_desc->img_len); 1699 virt_addr = virt_base; 1700 /* AE firmware */ 1701 if (((struct icp_qat_css_hdr *)(uintptr_t)virt_addr)->fw_type == 1702 CSS_AE_FIRMWARE) { 1703 auth_desc->img_ae_mode_data_high = auth_desc->img_high; 1704 auth_desc->img_ae_mode_data_low = auth_desc->img_low; 1705 bus_addr = ADD_ADDR(auth_desc->img_ae_mode_data_high, 1706 auth_desc->img_ae_mode_data_low) + 1707 sizeof(struct icp_qat_simg_ae_mode); 1708 1709 auth_desc->img_ae_init_data_high = 1710 (unsigned int)(bus_addr >> BITS_IN_DWORD); 1711 auth_desc->img_ae_init_data_low = (unsigned int)bus_addr; 1712 bus_addr += ICP_QAT_SIMG_AE_INIT_SEQ_LEN; 1713 auth_desc->img_ae_insts_high = 1714 (unsigned int)(bus_addr >> BITS_IN_DWORD); 1715 auth_desc->img_ae_insts_low = (unsigned int)bus_addr; 1716 virt_addr += sizeof(struct icp_qat_css_hdr) + 1717 ICP_QAT_CSS_FWSK_PUB_LEN(device_id) + 1718 ICP_QAT_CSS_SIGNATURE_LEN(device_id); 1719 auth_desc->ae_mask = 1720 ((struct icp_qat_simg_ae_mode *)virt_addr)->ae_mask & 1721 handle->cfg_ae_mask; 1722 } else { 1723 auth_desc->img_ae_insts_high = auth_desc->img_high; 1724 auth_desc->img_ae_insts_low = auth_desc->img_low; 1725 } 1726 *desc = auth_desc; 1727 return 0; 1728 } 1729 1730 static int 1731 qat_uclo_load_fw(struct icp_qat_fw_loader_handle *handle, 1732 struct icp_qat_fw_auth_desc *desc) 1733 { 1734 unsigned int i = 0; 1735 unsigned int fcu_sts; 1736 unsigned int fcu_sts_csr, fcu_ctl_csr; 1737 unsigned int loaded_aes = FCU_LOADED_AE_POS; 1738 unsigned long ae_mask = handle->hal_handle->ae_mask; 1739 1740 if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { 1741 fcu_ctl_csr = FCU_CONTROL_C4XXX; 1742 fcu_sts_csr = FCU_STATUS_C4XXX; 1743 1744 } else { 1745 fcu_ctl_csr = FCU_CONTROL; 1746 fcu_sts_csr = FCU_STATUS; 1747 } 1748 1749 for_each_set_bit(i, &ae_mask, handle->hal_handle->ae_max_num) 1750 { 1751 int retry = 0; 1752 1753 if (!((desc->ae_mask >> i) & 0x1)) 1754 continue; 1755 if (qat_hal_check_ae_active(handle, i)) { 1756 pr_err("QAT: AE %d is active\n", i); 1757 return EINVAL; 1758 } 1759 SET_FCU_CSR(handle, 1760 fcu_ctl_csr, 1761 (FCU_CTRL_CMD_LOAD | 1762 (IS_QAT_GEN4( 1763 pci_get_device(GET_DEV(handle->accel_dev))) ? 1764 (1 << FCU_CTRL_BROADCAST_POS) : 1765 0) | 1766 (i << FCU_CTRL_AE_POS))); 1767 1768 do { 1769 pause_ms("adfstop", FW_AUTH_WAIT_PERIOD); 1770 fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr); 1771 if ((fcu_sts & FCU_AUTH_STS_MASK) == 1772 FCU_STS_LOAD_DONE) { 1773 loaded_aes = IS_QAT_GEN3_OR_GEN4(pci_get_device( 1774 GET_DEV(handle->accel_dev))) ? 1775 GET_FCU_CSR(handle, FCU_AE_LOADED_C4XXX) : 1776 (fcu_sts >> FCU_LOADED_AE_POS); 1777 if (loaded_aes & (1 << i)) 1778 break; 1779 } 1780 } while (retry++ < FW_AUTH_MAX_RETRY); 1781 if (retry > FW_AUTH_MAX_RETRY) { 1782 pr_err("QAT: firmware load failed timeout %x\n", retry); 1783 return EINVAL; 1784 } 1785 } 1786 return 0; 1787 } 1788 1789 static int 1790 qat_uclo_map_suof_obj(struct icp_qat_fw_loader_handle *handle, 1791 const void *addr_ptr, 1792 int mem_size) 1793 { 1794 struct icp_qat_suof_handle *suof_handle; 1795 1796 suof_handle = malloc(sizeof(*suof_handle), M_QAT, M_WAITOK | M_ZERO); 1797 handle->sobj_handle = suof_handle; 1798 if (qat_uclo_map_suof(handle, addr_ptr, mem_size)) { 1799 qat_uclo_del_suof(handle); 1800 pr_err("QAT: map SUOF failed\n"); 1801 return EINVAL; 1802 } 1803 return 0; 1804 } 1805 1806 int 1807 qat_uclo_wr_mimage(struct icp_qat_fw_loader_handle *handle, 1808 const void *addr_ptr, 1809 int mem_size) 1810 { 1811 struct icp_qat_fw_auth_desc *desc = NULL; 1812 struct icp_firml_dram_desc img_desc; 1813 int status = 0; 1814 1815 if (handle->fw_auth) { 1816 status = qat_uclo_map_auth_fw( 1817 handle, addr_ptr, mem_size, &img_desc, &desc); 1818 if (!status) 1819 status = qat_uclo_auth_fw(handle, desc); 1820 1821 qat_uclo_simg_free(handle, &img_desc); 1822 } else { 1823 if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) { 1824 device_printf( 1825 NULL, "QAT: PKE service is not allowed because "); 1826 device_printf(NULL, "MMP fw will not be loaded for "); 1827 device_printf(NULL, 1828 "device 0x%x", 1829 pci_get_device( 1830 GET_DEV(handle->accel_dev))); 1831 return status; 1832 } 1833 status = qat_uclo_wr_sram_by_words(handle, 1834 handle->hal_sram_offset, 1835 addr_ptr, 1836 mem_size); 1837 } 1838 return status; 1839 } 1840 1841 static int 1842 qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle, 1843 const void *addr_ptr, 1844 int mem_size) 1845 { 1846 struct icp_qat_uof_filehdr *filehdr; 1847 struct icp_qat_uclo_objhandle *objhdl; 1848 1849 objhdl = malloc(sizeof(*objhdl), M_QAT, M_WAITOK | M_ZERO); 1850 objhdl->obj_buf = malloc(mem_size, M_QAT, M_WAITOK); 1851 bcopy(addr_ptr, objhdl->obj_buf, mem_size); 1852 filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf; 1853 if (qat_uclo_check_uof_format(filehdr)) 1854 goto out_objhdr_err; 1855 objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, 1856 filehdr, 1857 ICP_QAT_UOF_OBJS); 1858 if (!objhdl->obj_hdr) { 1859 pr_err("QAT: object file chunk is null\n"); 1860 goto out_objhdr_err; 1861 } 1862 handle->obj_handle = objhdl; 1863 if (qat_uclo_parse_uof_obj(handle)) 1864 goto out_overlay_obj_err; 1865 return 0; 1866 1867 out_overlay_obj_err: 1868 handle->obj_handle = NULL; 1869 free(objhdl->obj_hdr, M_QAT); 1870 out_objhdr_err: 1871 free(objhdl->obj_buf, M_QAT); 1872 free(objhdl, M_QAT); 1873 return ENOMEM; 1874 } 1875 1876 static int 1877 qat_uclo_map_mof_file_hdr(struct icp_qat_fw_loader_handle *handle, 1878 const struct icp_qat_mof_file_hdr *mof_ptr, 1879 u32 mof_size) 1880 { 1881 unsigned int checksum = 0; 1882 unsigned int min_ver_offset = 0; 1883 struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle; 1884 1885 mobj_handle->file_id = ICP_QAT_MOF_FID; 1886 mobj_handle->mof_buf = (const char *)mof_ptr; 1887 mobj_handle->mof_size = mof_size; 1888 1889 min_ver_offset = 1890 mof_size - offsetof(struct icp_qat_mof_file_hdr, min_ver); 1891 checksum = qat_uclo_calc_str_checksum((const char *)&mof_ptr->min_ver, 1892 min_ver_offset); 1893 if (checksum != mof_ptr->checksum) { 1894 pr_err("QAT: incorrect MOF checksum\n"); 1895 return EINVAL; 1896 } 1897 mobj_handle->checksum = mof_ptr->checksum; 1898 mobj_handle->min_ver = mof_ptr->min_ver; 1899 mobj_handle->maj_ver = mof_ptr->maj_ver; 1900 return 0; 1901 } 1902 1903 void 1904 qat_uclo_del_mof(struct icp_qat_fw_loader_handle *handle) 1905 { 1906 struct icp_qat_mof_handle *mobj_handle = handle->mobj_handle; 1907 1908 free(mobj_handle->obj_table.obj_hdr, M_QAT); 1909 mobj_handle->obj_table.obj_hdr = NULL; 1910 free(handle->mobj_handle, M_QAT); 1911 handle->mobj_handle = NULL; 1912 } 1913 1914 static int 1915 qat_uclo_seek_obj_inside_mof(struct icp_qat_mof_handle *mobj_handle, 1916 const char *obj_name, 1917 const char **obj_ptr, 1918 unsigned int *obj_size) 1919 { 1920 unsigned int i; 1921 struct icp_qat_mof_objhdr *obj_hdr = mobj_handle->obj_table.obj_hdr; 1922 1923 for (i = 0; i < mobj_handle->obj_table.num_objs; i++) { 1924 if (!strncmp(obj_hdr[i].obj_name, 1925 obj_name, 1926 ICP_QAT_SUOF_OBJ_NAME_LEN)) { 1927 *obj_ptr = obj_hdr[i].obj_buf; 1928 *obj_size = obj_hdr[i].obj_size; 1929 break; 1930 } 1931 } 1932 1933 if (i >= mobj_handle->obj_table.num_objs) { 1934 pr_err("QAT: object %s is not found inside MOF\n", obj_name); 1935 return EFAULT; 1936 } 1937 return 0; 1938 } 1939 1940 static int 1941 qat_uclo_map_obj_from_mof(struct icp_qat_mof_handle *mobj_handle, 1942 struct icp_qat_mof_objhdr *mobj_hdr, 1943 struct icp_qat_mof_obj_chunkhdr *obj_chunkhdr) 1944 { 1945 if ((strncmp((char *)obj_chunkhdr->chunk_id, 1946 ICP_QAT_UOF_IMAG, 1947 ICP_QAT_MOF_OBJ_CHUNKID_LEN)) == 0) { 1948 mobj_hdr->obj_buf = 1949 (const char *)((unsigned long)obj_chunkhdr->offset + 1950 mobj_handle->uobjs_hdr); 1951 } else if ((strncmp((char *)(obj_chunkhdr->chunk_id), 1952 ICP_QAT_SUOF_IMAG, 1953 ICP_QAT_MOF_OBJ_CHUNKID_LEN)) == 0) { 1954 mobj_hdr->obj_buf = 1955 (const char *)((unsigned long)obj_chunkhdr->offset + 1956 mobj_handle->sobjs_hdr); 1957 1958 } else { 1959 pr_err("QAT: unsupported chunk id\n"); 1960 return EINVAL; 1961 } 1962 mobj_hdr->obj_size = (unsigned int)obj_chunkhdr->size; 1963 mobj_hdr->obj_name = 1964 (char *)(obj_chunkhdr->name + mobj_handle->sym_str); 1965 return 0; 1966 } 1967 1968 static int 1969 qat_uclo_map_objs_from_mof(struct icp_qat_mof_handle *mobj_handle) 1970 { 1971 struct icp_qat_mof_objhdr *mof_obj_hdr; 1972 const struct icp_qat_mof_obj_hdr *uobj_hdr; 1973 const struct icp_qat_mof_obj_hdr *sobj_hdr; 1974 struct icp_qat_mof_obj_chunkhdr *uobj_chunkhdr; 1975 struct icp_qat_mof_obj_chunkhdr *sobj_chunkhdr; 1976 unsigned int uobj_chunk_num = 0, sobj_chunk_num = 0; 1977 unsigned int *valid_chunks = 0; 1978 int ret, i; 1979 1980 uobj_hdr = (const struct icp_qat_mof_obj_hdr *)mobj_handle->uobjs_hdr; 1981 sobj_hdr = (const struct icp_qat_mof_obj_hdr *)mobj_handle->sobjs_hdr; 1982 if (uobj_hdr) 1983 uobj_chunk_num = uobj_hdr->num_chunks; 1984 if (sobj_hdr) 1985 sobj_chunk_num = sobj_hdr->num_chunks; 1986 1987 mof_obj_hdr = (struct icp_qat_mof_objhdr *) 1988 malloc((uobj_chunk_num + sobj_chunk_num) * sizeof(*mof_obj_hdr), 1989 M_QAT, 1990 M_WAITOK | M_ZERO); 1991 1992 mobj_handle->obj_table.obj_hdr = mof_obj_hdr; 1993 valid_chunks = &mobj_handle->obj_table.num_objs; 1994 uobj_chunkhdr = 1995 (struct icp_qat_mof_obj_chunkhdr *)((uintptr_t)uobj_hdr + 1996 sizeof(*uobj_hdr)); 1997 sobj_chunkhdr = 1998 (struct icp_qat_mof_obj_chunkhdr *)((uintptr_t)sobj_hdr + 1999 sizeof(*sobj_hdr)); 2000 2001 /* map uof objects */ 2002 for (i = 0; i < uobj_chunk_num; i++) { 2003 ret = qat_uclo_map_obj_from_mof(mobj_handle, 2004 &mof_obj_hdr[*valid_chunks], 2005 &uobj_chunkhdr[i]); 2006 if (ret) 2007 return ret; 2008 (*valid_chunks)++; 2009 } 2010 2011 /* map suof objects */ 2012 for (i = 0; i < sobj_chunk_num; i++) { 2013 ret = qat_uclo_map_obj_from_mof(mobj_handle, 2014 &mof_obj_hdr[*valid_chunks], 2015 &sobj_chunkhdr[i]); 2016 if (ret) 2017 return ret; 2018 (*valid_chunks)++; 2019 } 2020 2021 if ((uobj_chunk_num + sobj_chunk_num) != *valid_chunks) { 2022 pr_err("QAT: inconsistent UOF/SUOF chunk amount\n"); 2023 return EINVAL; 2024 } 2025 return 0; 2026 } 2027 2028 static void 2029 qat_uclo_map_mof_symobjs(struct icp_qat_mof_handle *mobj_handle, 2030 struct icp_qat_mof_chunkhdr *mof_chunkhdr) 2031 { 2032 char **sym_str = (char **)&mobj_handle->sym_str; 2033 unsigned int *sym_size = &mobj_handle->sym_size; 2034 struct icp_qat_mof_str_table *str_table_obj; 2035 2036 *sym_size = *(unsigned int *)(uintptr_t)(mof_chunkhdr->offset + 2037 mobj_handle->mof_buf); 2038 *sym_str = 2039 (char *)(uintptr_t)(mobj_handle->mof_buf + mof_chunkhdr->offset + 2040 sizeof(str_table_obj->tab_len)); 2041 } 2042 2043 static void 2044 qat_uclo_map_mof_chunk(struct icp_qat_mof_handle *mobj_handle, 2045 struct icp_qat_mof_chunkhdr *mof_chunkhdr) 2046 { 2047 if (!strncmp(mof_chunkhdr->chunk_id, 2048 ICP_QAT_MOF_SYM_OBJS, 2049 ICP_QAT_MOF_OBJ_ID_LEN)) 2050 qat_uclo_map_mof_symobjs(mobj_handle, mof_chunkhdr); 2051 else if (!strncmp(mof_chunkhdr->chunk_id, 2052 ICP_QAT_UOF_OBJS, 2053 ICP_QAT_MOF_OBJ_ID_LEN)) 2054 mobj_handle->uobjs_hdr = 2055 mobj_handle->mof_buf + (unsigned long)mof_chunkhdr->offset; 2056 else if (!strncmp(mof_chunkhdr->chunk_id, 2057 ICP_QAT_SUOF_OBJS, 2058 ICP_QAT_MOF_OBJ_ID_LEN)) 2059 mobj_handle->sobjs_hdr = 2060 mobj_handle->mof_buf + (unsigned long)mof_chunkhdr->offset; 2061 } 2062 2063 static int 2064 qat_uclo_check_mof_format(const struct icp_qat_mof_file_hdr *mof_hdr) 2065 { 2066 int maj = mof_hdr->maj_ver & 0xff; 2067 int min = mof_hdr->min_ver & 0xff; 2068 2069 if (mof_hdr->file_id != ICP_QAT_MOF_FID) { 2070 pr_err("QAT: invalid header 0x%x\n", mof_hdr->file_id); 2071 return EINVAL; 2072 } 2073 2074 if (mof_hdr->num_chunks <= 0x1) { 2075 pr_err("QAT: MOF chunk amount is incorrect\n"); 2076 return EINVAL; 2077 } 2078 if (maj != ICP_QAT_MOF_MAJVER || min != ICP_QAT_MOF_MINVER) { 2079 pr_err("QAT: bad MOF version, major 0x%x, minor 0x%x\n", 2080 maj, 2081 min); 2082 return EINVAL; 2083 } 2084 return 0; 2085 } 2086 2087 static int 2088 qat_uclo_map_mof_obj(struct icp_qat_fw_loader_handle *handle, 2089 const struct icp_qat_mof_file_hdr *mof_ptr, 2090 u32 mof_size, 2091 const char *obj_name, 2092 const char **obj_ptr, 2093 unsigned int *obj_size) 2094 { 2095 struct icp_qat_mof_handle *mobj_handle; 2096 struct icp_qat_mof_chunkhdr *mof_chunkhdr; 2097 unsigned short chunks_num; 2098 int ret; 2099 unsigned int i; 2100 2101 if (mof_ptr->file_id == ICP_QAT_UOF_FID || 2102 mof_ptr->file_id == ICP_QAT_SUOF_FID) { 2103 if (obj_ptr) 2104 *obj_ptr = (const char *)mof_ptr; 2105 if (obj_size) 2106 *obj_size = (unsigned int)mof_size; 2107 return 0; 2108 } 2109 if (qat_uclo_check_mof_format(mof_ptr)) 2110 return EINVAL; 2111 mobj_handle = malloc(sizeof(*mobj_handle), M_QAT, M_WAITOK | M_ZERO); 2112 handle->mobj_handle = mobj_handle; 2113 ret = qat_uclo_map_mof_file_hdr(handle, mof_ptr, mof_size); 2114 if (ret) 2115 return ret; 2116 mof_chunkhdr = (struct icp_qat_mof_chunkhdr *)((uintptr_t)mof_ptr + 2117 sizeof(*mof_ptr)); 2118 chunks_num = mof_ptr->num_chunks; 2119 /*Parse MOF file chunks*/ 2120 for (i = 0; i < chunks_num; i++) 2121 qat_uclo_map_mof_chunk(mobj_handle, &mof_chunkhdr[i]); 2122 /*All sym_objs uobjs and sobjs should be available*/ 2123 if (!mobj_handle->sym_str || 2124 (!mobj_handle->uobjs_hdr && !mobj_handle->sobjs_hdr)) 2125 return EINVAL; 2126 ret = qat_uclo_map_objs_from_mof(mobj_handle); 2127 if (ret) 2128 return ret; 2129 /*Seek specified uof object in MOF*/ 2130 ret = qat_uclo_seek_obj_inside_mof(mobj_handle, 2131 obj_name, 2132 obj_ptr, 2133 obj_size); 2134 if (ret) 2135 return ret; 2136 return 0; 2137 } 2138 2139 int 2140 qat_uclo_map_obj(struct icp_qat_fw_loader_handle *handle, 2141 const void *addr_ptr, 2142 u32 mem_size, 2143 const char *obj_name) 2144 { 2145 const char *obj_addr; 2146 u32 obj_size; 2147 int ret; 2148 2149 BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE > 2150 (sizeof(handle->hal_handle->ae_mask) * 8)); 2151 2152 if (!handle || !addr_ptr || mem_size < 24) 2153 return EINVAL; 2154 2155 if (obj_name) { 2156 ret = qat_uclo_map_mof_obj( 2157 handle, addr_ptr, mem_size, obj_name, &obj_addr, &obj_size); 2158 if (ret) 2159 return ret; 2160 } else { 2161 obj_addr = addr_ptr; 2162 obj_size = mem_size; 2163 } 2164 2165 return (handle->fw_auth) ? 2166 qat_uclo_map_suof_obj(handle, obj_addr, obj_size) : 2167 qat_uclo_map_uof_obj(handle, obj_addr, obj_size); 2168 } 2169 2170 void 2171 qat_uclo_del_obj(struct icp_qat_fw_loader_handle *handle) 2172 { 2173 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 2174 unsigned int a; 2175 unsigned long ae_mask = handle->hal_handle->ae_mask; 2176 2177 if (handle->mobj_handle) 2178 qat_uclo_del_mof(handle); 2179 if (handle->sobj_handle) 2180 qat_uclo_del_suof(handle); 2181 if (!obj_handle) 2182 return; 2183 2184 free(obj_handle->uword_buf, M_QAT); 2185 for (a = 0; a < obj_handle->uimage_num; a++) 2186 free(obj_handle->ae_uimage[a].page, M_QAT); 2187 2188 for_each_set_bit(a, &ae_mask, handle->hal_handle->ae_max_num) 2189 { 2190 qat_uclo_free_ae_data(&obj_handle->ae_data[a]); 2191 } 2192 2193 free(obj_handle->obj_hdr, M_QAT); 2194 free(obj_handle->obj_buf, M_QAT); 2195 free(obj_handle, M_QAT); 2196 handle->obj_handle = NULL; 2197 } 2198 2199 static void 2200 qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle, 2201 struct icp_qat_uclo_encap_page *encap_page, 2202 uint64_t *uword, 2203 unsigned int addr_p, 2204 unsigned int raddr, 2205 uint64_t fill) 2206 { 2207 uint64_t uwrd = 0; 2208 unsigned int i, addr; 2209 2210 if (!encap_page) { 2211 *uword = fill; 2212 return; 2213 } 2214 addr = (encap_page->page_region) ? raddr : addr_p; 2215 for (i = 0; i < encap_page->uwblock_num; i++) { 2216 if (addr >= encap_page->uwblock[i].start_addr && 2217 addr <= encap_page->uwblock[i].start_addr + 2218 encap_page->uwblock[i].words_num - 1) { 2219 addr -= encap_page->uwblock[i].start_addr; 2220 addr *= obj_handle->uword_in_bytes; 2221 memcpy(&uwrd, 2222 (void *)(((uintptr_t)encap_page->uwblock[i] 2223 .micro_words) + 2224 addr), 2225 obj_handle->uword_in_bytes); 2226 uwrd = uwrd & 0xbffffffffffull; 2227 } 2228 } 2229 *uword = uwrd; 2230 if (*uword == INVLD_UWORD) 2231 *uword = fill; 2232 } 2233 2234 static void 2235 qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle, 2236 struct icp_qat_uclo_encap_page *encap_page, 2237 unsigned int ae) 2238 { 2239 unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen; 2240 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 2241 uint64_t fill_pat; 2242 2243 /* load the page starting at appropriate ustore address */ 2244 /* get fill-pattern from an image -- they are all the same */ 2245 memcpy(&fill_pat, 2246 obj_handle->ae_uimage[0].img_ptr->fill_pattern, 2247 sizeof(uint64_t)); 2248 uw_physical_addr = encap_page->beg_addr_p; 2249 uw_relative_addr = 0; 2250 words_num = encap_page->micro_words_num; 2251 while (words_num) { 2252 if (words_num < UWORD_CPYBUF_SIZE) 2253 cpylen = words_num; 2254 else 2255 cpylen = UWORD_CPYBUF_SIZE; 2256 2257 /* load the buffer */ 2258 for (i = 0; i < cpylen; i++) 2259 qat_uclo_fill_uwords(obj_handle, 2260 encap_page, 2261 &obj_handle->uword_buf[i], 2262 uw_physical_addr + i, 2263 uw_relative_addr + i, 2264 fill_pat); 2265 2266 if (obj_handle->ae_data[ae].shareable_ustore && 2267 !IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) 2268 /* copy the buffer to ustore */ 2269 qat_hal_wr_coalesce_uwords(handle, 2270 (unsigned char)ae, 2271 uw_physical_addr, 2272 cpylen, 2273 obj_handle->uword_buf); 2274 else 2275 /* copy the buffer to ustore */ 2276 qat_hal_wr_uwords(handle, 2277 (unsigned char)ae, 2278 uw_physical_addr, 2279 cpylen, 2280 obj_handle->uword_buf); 2281 uw_physical_addr += cpylen; 2282 uw_relative_addr += cpylen; 2283 words_num -= cpylen; 2284 } 2285 } 2286 2287 static void 2288 qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle, 2289 struct icp_qat_uof_image *image) 2290 { 2291 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 2292 unsigned int ctx_mask, s; 2293 struct icp_qat_uclo_page *page; 2294 unsigned char ae = 0; 2295 int ctx; 2296 struct icp_qat_uclo_aedata *aed; 2297 unsigned long ae_mask = handle->hal_handle->ae_mask; 2298 2299 if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX) 2300 ctx_mask = 0xff; 2301 else 2302 ctx_mask = 0x55; 2303 /* load the default page and set assigned CTX PC 2304 * to the entrypoint address 2305 */ 2306 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num) 2307 { 2308 unsigned long cfg_ae_mask = handle->cfg_ae_mask; 2309 unsigned long ae_assigned = image->ae_assigned; 2310 2311 if (!test_bit(ae, &cfg_ae_mask)) 2312 continue; 2313 2314 if (!test_bit(ae, &ae_assigned)) 2315 continue; 2316 2317 aed = &obj_handle->ae_data[ae]; 2318 /* find the slice to which this image is assigned */ 2319 for (s = 0; s < aed->slice_num; s++) { 2320 if (image->ctx_assigned & 2321 aed->ae_slices[s].ctx_mask_assigned) 2322 break; 2323 } 2324 if (s >= aed->slice_num) 2325 continue; 2326 page = aed->ae_slices[s].page; 2327 if (!page->encap_page->def_page) 2328 continue; 2329 qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae); 2330 2331 page = aed->ae_slices[s].page; 2332 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) 2333 aed->ae_slices[s].cur_page[ctx] = 2334 (ctx_mask & (1 << ctx)) ? page : NULL; 2335 qat_hal_set_live_ctx(handle, 2336 (unsigned char)ae, 2337 image->ctx_assigned); 2338 qat_hal_set_pc(handle, 2339 (unsigned char)ae, 2340 image->ctx_assigned, 2341 image->entry_address); 2342 } 2343 } 2344 2345 static int 2346 qat_uclo_wr_suof_img(struct icp_qat_fw_loader_handle *handle) 2347 { 2348 unsigned int i; 2349 struct icp_qat_fw_auth_desc *desc = NULL; 2350 struct icp_firml_dram_desc img_desc; 2351 struct icp_qat_suof_handle *sobj_handle = handle->sobj_handle; 2352 struct icp_qat_suof_img_hdr *simg_hdr = sobj_handle->img_table.simg_hdr; 2353 2354 for (i = 0; i < sobj_handle->img_table.num_simgs; i++) { 2355 if (qat_uclo_map_auth_fw(handle, 2356 (const char *)simg_hdr[i].simg_buf, 2357 (unsigned int)(simg_hdr[i].simg_len), 2358 &img_desc, 2359 &desc)) 2360 goto wr_err; 2361 if (qat_uclo_auth_fw(handle, desc)) 2362 goto wr_err; 2363 if (qat_uclo_is_broadcast(handle, i)) { 2364 if (qat_uclo_broadcast_load_fw(handle, desc)) 2365 goto wr_err; 2366 } else { 2367 if (qat_uclo_load_fw(handle, desc)) 2368 goto wr_err; 2369 } 2370 qat_uclo_simg_free(handle, &img_desc); 2371 } 2372 2373 return 0; 2374 wr_err: 2375 qat_uclo_simg_free(handle, &img_desc); 2376 return -EINVAL; 2377 } 2378 2379 static int 2380 qat_uclo_wr_uof_img(struct icp_qat_fw_loader_handle *handle) 2381 { 2382 struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle; 2383 unsigned int i; 2384 2385 if (qat_uclo_init_globals(handle)) 2386 return EINVAL; 2387 for (i = 0; i < obj_handle->uimage_num; i++) { 2388 if (!obj_handle->ae_uimage[i].img_ptr) 2389 return EINVAL; 2390 if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i])) 2391 return EINVAL; 2392 qat_uclo_wr_uimage_page(handle, 2393 obj_handle->ae_uimage[i].img_ptr); 2394 } 2395 return 0; 2396 } 2397 2398 int 2399 qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle) 2400 { 2401 return (handle->fw_auth) ? qat_uclo_wr_suof_img(handle) : 2402 qat_uclo_wr_uof_img(handle); 2403 } 2404 2405 int 2406 qat_uclo_set_cfg_ae_mask(struct icp_qat_fw_loader_handle *handle, 2407 unsigned int cfg_ae_mask) 2408 { 2409 if (!cfg_ae_mask) 2410 return EINVAL; 2411 2412 handle->cfg_ae_mask = cfg_ae_mask; 2413 return 0; 2414 } 2415