1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright(c) 2007-2022 Intel Corporation */ 3 /* $FreeBSD$ */ 4 #include "qat_freebsd.h" 5 #include "adf_cfg.h" 6 #include "adf_common_drv.h" 7 #include "adf_accel_devices.h" 8 #include "icp_qat_uclo.h" 9 #include "icp_qat_fw.h" 10 #include "icp_qat_fw_init_admin.h" 11 #include "adf_cfg_strings.h" 12 #include "adf_dev_err.h" 13 #include "adf_uio.h" 14 #include "adf_transport_access_macros.h" 15 #include "adf_transport_internal.h" 16 #include <sys/mutex.h> 17 #include <linux/delay.h> 18 #include "adf_accel_devices.h" 19 #include "adf_cfg.h" 20 #include "adf_common_drv.h" 21 #include "icp_qat_fw.h" 22 23 /* Mask used to check the CompressAndVerify capability bit */ 24 #define DC_CNV_EXTENDED_CAPABILITY (0x01) 25 26 /* Mask used to check the CompressAndVerifyAndRecover capability bit */ 27 #define DC_CNVNR_EXTENDED_CAPABILITY (0x100) 28 29 static LIST_HEAD(service_table); 30 static DEFINE_MUTEX(service_lock); 31 32 static void 33 adf_service_add(struct service_hndl *service) 34 { 35 mutex_lock(&service_lock); 36 list_add(&service->list, &service_table); 37 mutex_unlock(&service_lock); 38 } 39 40 int 41 adf_service_register(struct service_hndl *service) 42 { 43 memset(service->init_status, 0, sizeof(service->init_status)); 44 memset(service->start_status, 0, sizeof(service->start_status)); 45 adf_service_add(service); 46 return 0; 47 } 48 49 static void 50 adf_service_remove(struct service_hndl *service) 51 { 52 mutex_lock(&service_lock); 53 list_del(&service->list); 54 mutex_unlock(&service_lock); 55 } 56 57 int 58 adf_service_unregister(struct service_hndl *service) 59 { 60 int i; 61 62 for (i = 0; i < ARRAY_SIZE(service->init_status); i++) { 63 if (service->init_status[i] || service->start_status[i]) { 64 pr_err("QAT: Could not remove active service [%d]\n", 65 i); 66 return EFAULT; 67 } 68 } 69 adf_service_remove(service); 70 return 0; 71 } 72 73 static int 74 adf_cfg_add_device_params(struct adf_accel_dev *accel_dev) 75 { 76 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; 77 char hw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; 78 char mmp_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; 79 struct adf_hw_device_data *hw_data = NULL; 80 unsigned long val; 81 if (!accel_dev) 82 return -EINVAL; 83 84 hw_data = accel_dev->hw_device; 85 86 if (adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC)) 87 goto err; 88 89 snprintf(key, sizeof(key), ADF_DEV_MAX_BANKS); 90 val = GET_MAX_BANKS(accel_dev); 91 if (adf_cfg_add_key_value_param( 92 accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) 93 goto err; 94 95 snprintf(key, sizeof(key), ADF_DEV_CAPABILITIES_MASK); 96 val = hw_data->accel_capabilities_mask; 97 if (adf_cfg_add_key_value_param( 98 accel_dev, ADF_GENERAL_SEC, key, (void *)val, ADF_HEX)) 99 goto err; 100 101 snprintf(key, sizeof(key), ADF_DEV_PKG_ID); 102 val = accel_dev->accel_id; 103 if (adf_cfg_add_key_value_param( 104 accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) 105 goto err; 106 107 snprintf(key, sizeof(key), ADF_DEV_NODE_ID); 108 val = dev_to_node(GET_DEV(accel_dev)); 109 if (adf_cfg_add_key_value_param( 110 accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) 111 goto err; 112 113 snprintf(key, sizeof(key), ADF_DEV_MAX_RINGS_PER_BANK); 114 val = hw_data->num_rings_per_bank; 115 if (adf_cfg_add_key_value_param( 116 accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) 117 goto err; 118 119 snprintf(key, sizeof(key), ADF_HW_REV_ID_KEY); 120 snprintf(hw_version, 121 ADF_CFG_MAX_VAL_LEN_IN_BYTES, 122 "%d", 123 accel_dev->accel_pci_dev.revid); 124 if (adf_cfg_add_key_value_param( 125 accel_dev, ADF_GENERAL_SEC, key, (void *)hw_version, ADF_STR)) 126 goto err; 127 128 snprintf(key, sizeof(key), ADF_MMP_VER_KEY); 129 snprintf(mmp_version, 130 ADF_CFG_MAX_VAL_LEN_IN_BYTES, 131 "%d.%d.%d", 132 accel_dev->fw_versions.mmp_version_major, 133 accel_dev->fw_versions.mmp_version_minor, 134 accel_dev->fw_versions.mmp_version_patch); 135 if (adf_cfg_add_key_value_param( 136 accel_dev, ADF_GENERAL_SEC, key, (void *)mmp_version, ADF_STR)) 137 goto err; 138 139 return 0; 140 err: 141 device_printf(GET_DEV(accel_dev), 142 "Failed to add internal values to accel_dev cfg\n"); 143 return -EINVAL; 144 } 145 146 static int 147 adf_cfg_add_fw_version(struct adf_accel_dev *accel_dev) 148 { 149 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; 150 char fw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; 151 152 snprintf(key, sizeof(key), ADF_UOF_VER_KEY); 153 snprintf(fw_version, 154 ADF_CFG_MAX_VAL_LEN_IN_BYTES, 155 "%d.%d.%d", 156 accel_dev->fw_versions.fw_version_major, 157 accel_dev->fw_versions.fw_version_minor, 158 accel_dev->fw_versions.fw_version_patch); 159 if (adf_cfg_add_key_value_param( 160 accel_dev, ADF_GENERAL_SEC, key, (void *)fw_version, ADF_STR)) 161 return EFAULT; 162 163 return 0; 164 } 165 166 static int 167 adf_cfg_add_ext_params(struct adf_accel_dev *accel_dev) 168 { 169 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; 170 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 171 unsigned long val; 172 173 snprintf(key, sizeof(key), ADF_DC_EXTENDED_FEATURES); 174 175 val = hw_data->extended_dc_capabilities; 176 if (adf_cfg_add_key_value_param( 177 accel_dev, ADF_GENERAL_SEC, key, (void *)val, ADF_HEX)) 178 return -EINVAL; 179 180 return 0; 181 } 182 183 void 184 adf_error_notifier(uintptr_t arg) 185 { 186 struct adf_accel_dev *accel_dev = (struct adf_accel_dev *)arg; 187 struct service_hndl *service; 188 struct list_head *list_itr; 189 190 list_for_each(list_itr, &service_table) 191 { 192 service = list_entry(list_itr, struct service_hndl, list); 193 if (service->event_hld(accel_dev, ADF_EVENT_ERROR)) 194 device_printf(GET_DEV(accel_dev), 195 "Failed to send error event to %s.\n", 196 service->name); 197 } 198 } 199 200 /** 201 * adf_set_ssm_wdtimer() - Initialize the slice hang watchdog timer. 202 * 203 * Return: 0 on success, error code otherwise. 204 */ 205 int 206 adf_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) 207 { 208 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 209 struct adf_bar *misc_bar = 210 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; 211 struct resource *csr = misc_bar->virt_addr; 212 u32 i; 213 unsigned int mask; 214 u32 clk_per_sec = hw_data->get_clock_speed(hw_data); 215 u32 timer_val = ADF_WDT_TIMER_SYM_COMP_MS * (clk_per_sec / 1000); 216 u32 timer_val_pke = ADF_GEN2_SSM_WDT_PKE_DEFAULT_VALUE; 217 char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; 218 219 /* Get Watch Dog Timer for CySym+Comp from the configuration */ 220 if (!adf_cfg_get_param_value(accel_dev, 221 ADF_GENERAL_SEC, 222 ADF_DEV_SSM_WDT_BULK, 223 (char *)timer_str)) { 224 if (!compat_strtouint((char *)timer_str, 225 ADF_CFG_BASE_DEC, 226 &timer_val)) 227 /* Convert msec to CPP clocks */ 228 timer_val = timer_val * (clk_per_sec / 1000); 229 } 230 /* Get Watch Dog Timer for CyAsym from the configuration */ 231 if (!adf_cfg_get_param_value(accel_dev, 232 ADF_GENERAL_SEC, 233 ADF_DEV_SSM_WDT_PKE, 234 (char *)timer_str)) { 235 if (!compat_strtouint((char *)timer_str, 236 ADF_CFG_BASE_DEC, 237 &timer_val_pke)) 238 /* Convert msec to CPP clocks */ 239 timer_val_pke = timer_val_pke * (clk_per_sec / 1000); 240 } 241 242 for (i = 0, mask = hw_data->accel_mask; mask; i++, mask >>= 1) { 243 if (!(mask & 1)) 244 continue; 245 /* Enable Watch Dog Timer for CySym + Comp */ 246 ADF_CSR_WR(csr, ADF_SSMWDT(i), timer_val); 247 /* Enable Watch Dog Timer for CyAsym */ 248 ADF_CSR_WR(csr, ADF_SSMWDTPKE(i), timer_val_pke); 249 } 250 return 0; 251 } 252 253 /** 254 * adf_dev_init() - Init data structures and services for the given accel device 255 * @accel_dev: Pointer to acceleration device. 256 * 257 * Initialize the ring data structures and the admin comms and arbitration 258 * services. 259 * 260 * Return: 0 on success, error code otherwise. 261 */ 262 int 263 adf_dev_init(struct adf_accel_dev *accel_dev) 264 { 265 struct service_hndl *service; 266 struct list_head *list_itr; 267 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 268 char value[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; 269 int ret = 0; 270 sysctl_ctx_init(&accel_dev->sysctl_ctx); 271 set_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, &accel_dev->status); 272 273 if (!hw_data) { 274 device_printf(GET_DEV(accel_dev), 275 "Failed to init device - hw_data not set\n"); 276 return EFAULT; 277 } 278 if (hw_data->reset_hw_units) 279 hw_data->reset_hw_units(accel_dev); 280 281 if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) && 282 !accel_dev->is_vf) { 283 device_printf(GET_DEV(accel_dev), "Device not configured\n"); 284 return EFAULT; 285 } 286 287 if (adf_init_etr_data(accel_dev)) { 288 device_printf(GET_DEV(accel_dev), "Failed initialize etr\n"); 289 return EFAULT; 290 } 291 292 if (hw_data->init_device && hw_data->init_device(accel_dev)) { 293 device_printf(GET_DEV(accel_dev), 294 "Failed to initialize device\n"); 295 return EFAULT; 296 } 297 298 if (hw_data->init_accel_units && hw_data->init_accel_units(accel_dev)) { 299 device_printf(GET_DEV(accel_dev), 300 "Failed initialize accel_units\n"); 301 return EFAULT; 302 } 303 304 if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) { 305 device_printf(GET_DEV(accel_dev), 306 "Failed initialize admin comms\n"); 307 return EFAULT; 308 } 309 310 if (hw_data->init_arb && hw_data->init_arb(accel_dev)) { 311 device_printf(GET_DEV(accel_dev), 312 "Failed initialize hw arbiter\n"); 313 return EFAULT; 314 } 315 316 if (hw_data->set_asym_rings_mask) 317 hw_data->set_asym_rings_mask(accel_dev); 318 319 hw_data->enable_ints(accel_dev); 320 321 if (adf_ae_init(accel_dev)) { 322 device_printf(GET_DEV(accel_dev), 323 "Failed to initialise Acceleration Engine\n"); 324 return EFAULT; 325 } 326 327 set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status); 328 329 if (adf_ae_fw_load(accel_dev)) { 330 device_printf(GET_DEV(accel_dev), 331 "Failed to load acceleration FW\n"); 332 return EFAULT; 333 } 334 set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status); 335 336 if (hw_data->alloc_irq(accel_dev)) { 337 device_printf(GET_DEV(accel_dev), 338 "Failed to allocate interrupts\n"); 339 return EFAULT; 340 } 341 set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); 342 343 if (hw_data->init_ras && hw_data->init_ras(accel_dev)) { 344 device_printf(GET_DEV(accel_dev), "Failed to init RAS\n"); 345 return EFAULT; 346 } 347 348 hw_data->enable_ints(accel_dev); 349 350 hw_data->enable_error_correction(accel_dev); 351 352 ret = hw_data->csr_info.pfvf_ops.enable_comms(accel_dev); 353 if (ret) 354 return ret; 355 356 if (adf_cfg_add_device_params(accel_dev)) 357 return EFAULT; 358 359 if (hw_data->add_pke_stats && hw_data->add_pke_stats(accel_dev)) 360 return EFAULT; 361 362 if (hw_data->add_misc_error && hw_data->add_misc_error(accel_dev)) 363 return EFAULT; 364 /* 365 * Subservice initialisation is divided into two stages: init and start. 366 * This is to facilitate any ordering dependencies between services 367 * prior to starting any of the accelerators. 368 */ 369 list_for_each(list_itr, &service_table) 370 { 371 service = list_entry(list_itr, struct service_hndl, list); 372 if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { 373 device_printf(GET_DEV(accel_dev), 374 "Failed to initialise service %s\n", 375 service->name); 376 return EFAULT; 377 } 378 set_bit(accel_dev->accel_id, service->init_status); 379 } 380 381 /* Read autoreset on error parameter */ 382 ret = adf_cfg_get_param_value(accel_dev, 383 ADF_GENERAL_SEC, 384 ADF_AUTO_RESET_ON_ERROR, 385 value); 386 if (!ret) { 387 if (compat_strtouint(value, 388 10, 389 &accel_dev->autoreset_on_error)) { 390 device_printf( 391 GET_DEV(accel_dev), 392 "Failed converting %s to a decimal value\n", 393 ADF_AUTO_RESET_ON_ERROR); 394 return EFAULT; 395 } 396 } 397 398 return 0; 399 } 400 401 /** 402 * adf_dev_start() - Start acceleration service for the given accel device 403 * @accel_dev: Pointer to acceleration device. 404 * 405 * Function notifies all the registered services that the acceleration device 406 * is ready to be used. 407 * To be used by QAT device specific drivers. 408 * 409 * Return: 0 on success, error code otherwise. 410 */ 411 int 412 adf_dev_start(struct adf_accel_dev *accel_dev) 413 { 414 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 415 struct service_hndl *service; 416 struct list_head *list_itr; 417 418 set_bit(ADF_STATUS_STARTING, &accel_dev->status); 419 if (adf_devmgr_verify_id(&accel_dev->accel_id)) { 420 device_printf(GET_DEV(accel_dev), 421 "QAT: Device %d not found\n", 422 accel_dev->accel_id); 423 return ENODEV; 424 } 425 if (adf_ae_start(accel_dev)) { 426 device_printf(GET_DEV(accel_dev), "AE Start Failed\n"); 427 return EFAULT; 428 } 429 430 set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); 431 if (hw_data->send_admin_init(accel_dev)) { 432 device_printf(GET_DEV(accel_dev), 433 "Failed to send init message\n"); 434 return EFAULT; 435 } 436 437 if (adf_cfg_add_fw_version(accel_dev)) { 438 device_printf(GET_DEV(accel_dev), 439 "Failed to update configuration FW version\n"); 440 return EFAULT; 441 } 442 443 if (hw_data->measure_clock) 444 hw_data->measure_clock(accel_dev); 445 446 /* 447 * Set ssm watch dog timer for slice hang detection 448 * Note! Not supported on devices older than C62x 449 */ 450 if (hw_data->set_ssm_wdtimer && hw_data->set_ssm_wdtimer(accel_dev)) { 451 device_printf(GET_DEV(accel_dev), 452 "QAT: Failed to set ssm watch dog timer\n"); 453 return EFAULT; 454 } 455 456 if (hw_data->int_timer_init && hw_data->int_timer_init(accel_dev)) { 457 device_printf(GET_DEV(accel_dev), 458 "Failed to init heartbeat interrupt timer\n"); 459 return -EFAULT; 460 } 461 462 list_for_each(list_itr, &service_table) 463 { 464 service = list_entry(list_itr, struct service_hndl, list); 465 if (service->event_hld(accel_dev, ADF_EVENT_START)) { 466 device_printf(GET_DEV(accel_dev), 467 "Failed to start service %s\n", 468 service->name); 469 return EFAULT; 470 } 471 set_bit(accel_dev->accel_id, service->start_status); 472 } 473 474 if (accel_dev->is_vf || !accel_dev->u1.pf.vf_info) { 475 /*Register UIO devices */ 476 if (adf_uio_register(accel_dev)) { 477 adf_uio_remove(accel_dev); 478 device_printf(GET_DEV(accel_dev), 479 "Failed to register UIO devices\n"); 480 set_bit(ADF_STATUS_STARTING, &accel_dev->status); 481 clear_bit(ADF_STATUS_STARTED, &accel_dev->status); 482 return ENODEV; 483 } 484 } 485 486 if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status) && 487 adf_cfg_add_ext_params(accel_dev)) 488 return EFAULT; 489 490 clear_bit(ADF_STATUS_STARTING, &accel_dev->status); 491 set_bit(ADF_STATUS_STARTED, &accel_dev->status); 492 493 return 0; 494 } 495 496 /** 497 * adf_dev_stop() - Stop acceleration service for the given accel device 498 * @accel_dev: Pointer to acceleration device. 499 * 500 * Function notifies all the registered services that the acceleration device 501 * is shuting down. 502 * To be used by QAT device specific drivers. 503 * 504 * Return: 0 on success, error code otherwise. 505 */ 506 int 507 adf_dev_stop(struct adf_accel_dev *accel_dev) 508 { 509 struct service_hndl *service; 510 struct list_head *list_itr; 511 512 if (adf_devmgr_verify_id(&accel_dev->accel_id)) { 513 device_printf(GET_DEV(accel_dev), 514 "QAT: Device %d not found\n", 515 accel_dev->accel_id); 516 return ENODEV; 517 } 518 if (!adf_dev_started(accel_dev) && 519 !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) { 520 return 0; 521 } 522 523 if (adf_dev_stop_notify_sync(accel_dev)) { 524 device_printf( 525 GET_DEV(accel_dev), 526 "Waiting for device un-busy failed. Retries limit reached\n"); 527 return EBUSY; 528 } 529 530 clear_bit(ADF_STATUS_STARTING, &accel_dev->status); 531 clear_bit(ADF_STATUS_STARTED, &accel_dev->status); 532 533 if (accel_dev->hw_device->int_timer_exit) 534 accel_dev->hw_device->int_timer_exit(accel_dev); 535 536 list_for_each(list_itr, &service_table) 537 { 538 service = list_entry(list_itr, struct service_hndl, list); 539 if (!test_bit(accel_dev->accel_id, service->start_status)) 540 continue; 541 clear_bit(accel_dev->accel_id, service->start_status); 542 } 543 544 if (accel_dev->is_vf || !accel_dev->u1.pf.vf_info) { 545 /* Remove UIO Devices */ 546 adf_uio_remove(accel_dev); 547 } 548 549 if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) { 550 if (adf_ae_stop(accel_dev)) 551 device_printf(GET_DEV(accel_dev), 552 "failed to stop AE\n"); 553 else 554 clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); 555 } 556 557 return 0; 558 } 559 560 /** 561 * adf_dev_shutdown() - shutdown acceleration services and data strucutures 562 * @accel_dev: Pointer to acceleration device 563 * 564 * Cleanup the ring data structures and the admin comms and arbitration 565 * services. 566 */ 567 void 568 adf_dev_shutdown(struct adf_accel_dev *accel_dev) 569 { 570 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 571 struct service_hndl *service; 572 struct list_head *list_itr; 573 574 if (test_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, &accel_dev->status)) { 575 sysctl_ctx_free(&accel_dev->sysctl_ctx); 576 clear_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, 577 &accel_dev->status); 578 } 579 580 if (!hw_data) { 581 device_printf( 582 GET_DEV(accel_dev), 583 "QAT: Failed to shutdown device - hw_data not set\n"); 584 return; 585 } 586 587 if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) { 588 adf_ae_fw_release(accel_dev); 589 clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status); 590 } 591 592 if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) { 593 if (adf_ae_shutdown(accel_dev)) 594 device_printf(GET_DEV(accel_dev), 595 "Failed to shutdown Accel Engine\n"); 596 else 597 clear_bit(ADF_STATUS_AE_INITIALISED, 598 &accel_dev->status); 599 } 600 601 list_for_each(list_itr, &service_table) 602 { 603 service = list_entry(list_itr, struct service_hndl, list); 604 if (!test_bit(accel_dev->accel_id, service->init_status)) 605 continue; 606 if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) 607 device_printf(GET_DEV(accel_dev), 608 "Failed to shutdown service %s\n", 609 service->name); 610 else 611 clear_bit(accel_dev->accel_id, service->init_status); 612 } 613 614 hw_data->disable_iov(accel_dev); 615 616 if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { 617 hw_data->free_irq(accel_dev); 618 clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); 619 } 620 621 /* Delete configuration only if not restarting */ 622 if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) 623 adf_cfg_del_all(accel_dev); 624 625 if (hw_data->remove_pke_stats) 626 hw_data->remove_pke_stats(accel_dev); 627 628 if (hw_data->remove_misc_error) 629 hw_data->remove_misc_error(accel_dev); 630 631 if (hw_data->exit_ras) 632 hw_data->exit_ras(accel_dev); 633 634 if (hw_data->exit_arb) 635 hw_data->exit_arb(accel_dev); 636 637 if (hw_data->exit_admin_comms) 638 hw_data->exit_admin_comms(accel_dev); 639 640 if (hw_data->exit_accel_units) 641 hw_data->exit_accel_units(accel_dev); 642 643 adf_cleanup_etr_data(accel_dev); 644 if (hw_data->restore_device) 645 hw_data->restore_device(accel_dev); 646 } 647 648 /** 649 * adf_dev_reset() - Reset acceleration service for the given accel device 650 * @accel_dev: Pointer to acceleration device. 651 * @mode: Specifies reset mode - synchronous or asynchronous. 652 * Function notifies all the registered services that the acceleration device 653 * is resetting. 654 * To be used by QAT device specific drivers. 655 * 656 * Return: 0 on success, error code otherwise. 657 */ 658 int 659 adf_dev_reset(struct adf_accel_dev *accel_dev, enum adf_dev_reset_mode mode) 660 { 661 return adf_dev_aer_schedule_reset(accel_dev, mode); 662 } 663 664 int 665 adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) 666 { 667 struct service_hndl *service; 668 struct list_head *list_itr; 669 670 list_for_each(list_itr, &service_table) 671 { 672 service = list_entry(list_itr, struct service_hndl, list); 673 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) 674 device_printf(GET_DEV(accel_dev), 675 "Failed to restart service %s.\n", 676 service->name); 677 } 678 return 0; 679 } 680 681 int 682 adf_dev_restarting_notify_sync(struct adf_accel_dev *accel_dev) 683 { 684 int times; 685 686 adf_dev_restarting_notify(accel_dev); 687 for (times = 0; times < ADF_STOP_RETRY; times++) { 688 if (!adf_dev_in_use(accel_dev)) 689 break; 690 dev_dbg(GET_DEV(accel_dev), "retry times=%d\n", times); 691 pause_ms("adfstop", 100); 692 } 693 if (adf_dev_in_use(accel_dev)) { 694 clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); 695 device_printf(GET_DEV(accel_dev), 696 "Device still in use during reset sequence.\n"); 697 return EBUSY; 698 } 699 700 return 0; 701 } 702 703 int 704 adf_dev_stop_notify_sync(struct adf_accel_dev *accel_dev) 705 { 706 int times; 707 708 struct service_hndl *service; 709 struct list_head *list_itr; 710 711 list_for_each(list_itr, &service_table) 712 { 713 service = list_entry(list_itr, struct service_hndl, list); 714 if (service->event_hld(accel_dev, ADF_EVENT_STOP)) 715 device_printf(GET_DEV(accel_dev), 716 "Failed to restart service %s.\n", 717 service->name); 718 } 719 720 for (times = 0; times < ADF_STOP_RETRY; times++) { 721 if (!adf_dev_in_use(accel_dev)) 722 break; 723 dev_dbg(GET_DEV(accel_dev), "retry times=%d\n", times); 724 pause_ms("adfstop", 100); 725 } 726 if (adf_dev_in_use(accel_dev)) { 727 clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); 728 device_printf(GET_DEV(accel_dev), 729 "Device still in use during stop sequence.\n"); 730 return EBUSY; 731 } 732 733 return 0; 734 } 735 736 int 737 adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) 738 { 739 struct service_hndl *service; 740 struct list_head *list_itr; 741 742 list_for_each(list_itr, &service_table) 743 { 744 service = list_entry(list_itr, struct service_hndl, list); 745 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) 746 device_printf(GET_DEV(accel_dev), 747 "Failed to restart service %s.\n", 748 service->name); 749 } 750 return 0; 751 } 752