1 /* SPDX-License-Identifier: BSD-3-Clause */ 2 /* Copyright(c) 2007-2022 Intel Corporation */ 3 #include "qat_freebsd.h" 4 #include "adf_cfg.h" 5 #include "adf_common_drv.h" 6 #include "adf_accel_devices.h" 7 #include "icp_qat_uclo.h" 8 #include "icp_qat_fw.h" 9 #include "icp_qat_fw_init_admin.h" 10 #include "adf_cfg_strings.h" 11 #include "adf_dev_err.h" 12 #include "adf_uio.h" 13 #include "adf_transport_access_macros.h" 14 #include "adf_transport_internal.h" 15 #include <sys/mutex.h> 16 #include <linux/delay.h> 17 #include "adf_accel_devices.h" 18 #include "adf_cfg.h" 19 #include "adf_common_drv.h" 20 #include "icp_qat_fw.h" 21 22 /* Mask used to check the CompressAndVerify capability bit */ 23 #define DC_CNV_EXTENDED_CAPABILITY (0x01) 24 25 /* Mask used to check the CompressAndVerifyAndRecover capability bit */ 26 #define DC_CNVNR_EXTENDED_CAPABILITY (0x100) 27 28 static LIST_HEAD(service_table); 29 static DEFINE_MUTEX(service_lock); 30 31 static void 32 adf_service_add(struct service_hndl *service) 33 { 34 mutex_lock(&service_lock); 35 list_add(&service->list, &service_table); 36 mutex_unlock(&service_lock); 37 } 38 39 int 40 adf_service_register(struct service_hndl *service) 41 { 42 memset(service->init_status, 0, sizeof(service->init_status)); 43 memset(service->start_status, 0, sizeof(service->start_status)); 44 adf_service_add(service); 45 return 0; 46 } 47 48 static void 49 adf_service_remove(struct service_hndl *service) 50 { 51 mutex_lock(&service_lock); 52 list_del(&service->list); 53 mutex_unlock(&service_lock); 54 } 55 56 int 57 adf_service_unregister(struct service_hndl *service) 58 { 59 int i; 60 61 for (i = 0; i < ARRAY_SIZE(service->init_status); i++) { 62 if (service->init_status[i] || service->start_status[i]) { 63 pr_err("QAT: Could not remove active service [%d]\n", 64 i); 65 return EFAULT; 66 } 67 } 68 adf_service_remove(service); 69 return 0; 70 } 71 72 static int 73 adf_cfg_add_device_params(struct adf_accel_dev *accel_dev) 74 { 75 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; 76 char hw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; 77 char mmp_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; 78 struct adf_hw_device_data *hw_data = NULL; 79 unsigned long val; 80 if (!accel_dev) 81 return -EINVAL; 82 83 hw_data = accel_dev->hw_device; 84 85 if (adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC)) 86 goto err; 87 88 snprintf(key, sizeof(key), ADF_DEV_MAX_BANKS); 89 val = GET_MAX_BANKS(accel_dev); 90 if (adf_cfg_add_key_value_param( 91 accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) 92 goto err; 93 94 snprintf(key, sizeof(key), ADF_DEV_CAPABILITIES_MASK); 95 val = hw_data->accel_capabilities_mask; 96 if (adf_cfg_add_key_value_param( 97 accel_dev, ADF_GENERAL_SEC, key, (void *)val, ADF_HEX)) 98 goto err; 99 100 snprintf(key, sizeof(key), ADF_DEV_PKG_ID); 101 val = accel_dev->accel_id; 102 if (adf_cfg_add_key_value_param( 103 accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) 104 goto err; 105 106 snprintf(key, sizeof(key), ADF_DEV_NODE_ID); 107 val = dev_to_node(GET_DEV(accel_dev)); 108 if (adf_cfg_add_key_value_param( 109 accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) 110 goto err; 111 112 snprintf(key, sizeof(key), ADF_DEV_MAX_RINGS_PER_BANK); 113 val = hw_data->num_rings_per_bank; 114 if (adf_cfg_add_key_value_param( 115 accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC)) 116 goto err; 117 118 snprintf(key, sizeof(key), ADF_HW_REV_ID_KEY); 119 snprintf(hw_version, 120 ADF_CFG_MAX_VAL_LEN_IN_BYTES, 121 "%d", 122 accel_dev->accel_pci_dev.revid); 123 if (adf_cfg_add_key_value_param( 124 accel_dev, ADF_GENERAL_SEC, key, (void *)hw_version, ADF_STR)) 125 goto err; 126 127 snprintf(key, sizeof(key), ADF_MMP_VER_KEY); 128 snprintf(mmp_version, 129 ADF_CFG_MAX_VAL_LEN_IN_BYTES, 130 "%d.%d.%d", 131 accel_dev->fw_versions.mmp_version_major, 132 accel_dev->fw_versions.mmp_version_minor, 133 accel_dev->fw_versions.mmp_version_patch); 134 if (adf_cfg_add_key_value_param( 135 accel_dev, ADF_GENERAL_SEC, key, (void *)mmp_version, ADF_STR)) 136 goto err; 137 138 return 0; 139 err: 140 device_printf(GET_DEV(accel_dev), 141 "Failed to add internal values to accel_dev cfg\n"); 142 return -EINVAL; 143 } 144 145 static int 146 adf_cfg_add_fw_version(struct adf_accel_dev *accel_dev) 147 { 148 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; 149 char fw_version[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; 150 151 snprintf(key, sizeof(key), ADF_UOF_VER_KEY); 152 snprintf(fw_version, 153 ADF_CFG_MAX_VAL_LEN_IN_BYTES, 154 "%d.%d.%d", 155 accel_dev->fw_versions.fw_version_major, 156 accel_dev->fw_versions.fw_version_minor, 157 accel_dev->fw_versions.fw_version_patch); 158 if (adf_cfg_add_key_value_param( 159 accel_dev, ADF_GENERAL_SEC, key, (void *)fw_version, ADF_STR)) 160 return EFAULT; 161 162 return 0; 163 } 164 165 static int 166 adf_cfg_add_ext_params(struct adf_accel_dev *accel_dev) 167 { 168 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; 169 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 170 unsigned long val; 171 172 snprintf(key, sizeof(key), ADF_DC_EXTENDED_FEATURES); 173 174 val = hw_data->extended_dc_capabilities; 175 if (adf_cfg_add_key_value_param( 176 accel_dev, ADF_GENERAL_SEC, key, (void *)val, ADF_HEX)) 177 return -EINVAL; 178 179 return 0; 180 } 181 182 void 183 adf_error_notifier(uintptr_t arg) 184 { 185 struct adf_accel_dev *accel_dev = (struct adf_accel_dev *)arg; 186 struct service_hndl *service; 187 struct list_head *list_itr; 188 189 list_for_each(list_itr, &service_table) 190 { 191 service = list_entry(list_itr, struct service_hndl, list); 192 if (service->event_hld(accel_dev, ADF_EVENT_ERROR)) 193 device_printf(GET_DEV(accel_dev), 194 "Failed to send error event to %s.\n", 195 service->name); 196 } 197 } 198 199 /** 200 * adf_set_ssm_wdtimer() - Initialize the slice hang watchdog timer. 201 * 202 * Return: 0 on success, error code otherwise. 203 */ 204 int 205 adf_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) 206 { 207 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 208 struct adf_bar *misc_bar = 209 &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)]; 210 struct resource *csr = misc_bar->virt_addr; 211 u32 i; 212 unsigned int mask; 213 u32 clk_per_sec = hw_data->get_clock_speed(hw_data); 214 u32 timer_val = ADF_WDT_TIMER_SYM_COMP_MS * (clk_per_sec / 1000); 215 u32 timer_val_pke = ADF_GEN2_SSM_WDT_PKE_DEFAULT_VALUE; 216 char timer_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 }; 217 218 /* Get Watch Dog Timer for CySym+Comp from the configuration */ 219 if (!adf_cfg_get_param_value(accel_dev, 220 ADF_GENERAL_SEC, 221 ADF_DEV_SSM_WDT_BULK, 222 (char *)timer_str)) { 223 if (!compat_strtouint((char *)timer_str, 224 ADF_CFG_BASE_DEC, 225 &timer_val)) 226 /* Convert msec to CPP clocks */ 227 timer_val = timer_val * (clk_per_sec / 1000); 228 } 229 /* Get Watch Dog Timer for CyAsym from the configuration */ 230 if (!adf_cfg_get_param_value(accel_dev, 231 ADF_GENERAL_SEC, 232 ADF_DEV_SSM_WDT_PKE, 233 (char *)timer_str)) { 234 if (!compat_strtouint((char *)timer_str, 235 ADF_CFG_BASE_DEC, 236 &timer_val_pke)) 237 /* Convert msec to CPP clocks */ 238 timer_val_pke = timer_val_pke * (clk_per_sec / 1000); 239 } 240 241 for (i = 0, mask = hw_data->accel_mask; mask; i++, mask >>= 1) { 242 if (!(mask & 1)) 243 continue; 244 /* Enable Watch Dog Timer for CySym + Comp */ 245 ADF_CSR_WR(csr, ADF_SSMWDT(i), timer_val); 246 /* Enable Watch Dog Timer for CyAsym */ 247 ADF_CSR_WR(csr, ADF_SSMWDTPKE(i), timer_val_pke); 248 } 249 return 0; 250 } 251 252 /** 253 * adf_dev_init() - Init data structures and services for the given accel device 254 * @accel_dev: Pointer to acceleration device. 255 * 256 * Initialize the ring data structures and the admin comms and arbitration 257 * services. 258 * 259 * Return: 0 on success, error code otherwise. 260 */ 261 int 262 adf_dev_init(struct adf_accel_dev *accel_dev) 263 { 264 struct service_hndl *service; 265 struct list_head *list_itr; 266 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 267 char value[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; 268 int ret = 0; 269 sysctl_ctx_init(&accel_dev->sysctl_ctx); 270 set_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, &accel_dev->status); 271 272 if (!hw_data) { 273 device_printf(GET_DEV(accel_dev), 274 "Failed to init device - hw_data not set\n"); 275 return EFAULT; 276 } 277 if (hw_data->reset_hw_units) 278 hw_data->reset_hw_units(accel_dev); 279 280 if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) && 281 !accel_dev->is_vf) { 282 device_printf(GET_DEV(accel_dev), "Device not configured\n"); 283 return EFAULT; 284 } 285 286 if (adf_init_etr_data(accel_dev)) { 287 device_printf(GET_DEV(accel_dev), "Failed initialize etr\n"); 288 return EFAULT; 289 } 290 291 if (hw_data->init_device && hw_data->init_device(accel_dev)) { 292 device_printf(GET_DEV(accel_dev), 293 "Failed to initialize device\n"); 294 return EFAULT; 295 } 296 297 if (hw_data->init_accel_units && hw_data->init_accel_units(accel_dev)) { 298 device_printf(GET_DEV(accel_dev), 299 "Failed initialize accel_units\n"); 300 return EFAULT; 301 } 302 303 if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) { 304 device_printf(GET_DEV(accel_dev), 305 "Failed initialize admin comms\n"); 306 return EFAULT; 307 } 308 309 if (hw_data->init_arb && hw_data->init_arb(accel_dev)) { 310 device_printf(GET_DEV(accel_dev), 311 "Failed initialize hw arbiter\n"); 312 return EFAULT; 313 } 314 315 if (hw_data->set_asym_rings_mask) 316 hw_data->set_asym_rings_mask(accel_dev); 317 318 hw_data->enable_ints(accel_dev); 319 320 if (adf_ae_init(accel_dev)) { 321 device_printf(GET_DEV(accel_dev), 322 "Failed to initialise Acceleration Engine\n"); 323 return EFAULT; 324 } 325 326 set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status); 327 328 if (adf_ae_fw_load(accel_dev)) { 329 device_printf(GET_DEV(accel_dev), 330 "Failed to load acceleration FW\n"); 331 return EFAULT; 332 } 333 set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status); 334 335 if (hw_data->alloc_irq(accel_dev)) { 336 device_printf(GET_DEV(accel_dev), 337 "Failed to allocate interrupts\n"); 338 return EFAULT; 339 } 340 set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); 341 342 if (hw_data->init_ras && hw_data->init_ras(accel_dev)) { 343 device_printf(GET_DEV(accel_dev), "Failed to init RAS\n"); 344 return EFAULT; 345 } 346 347 hw_data->enable_ints(accel_dev); 348 349 hw_data->enable_error_correction(accel_dev); 350 351 ret = hw_data->csr_info.pfvf_ops.enable_comms(accel_dev); 352 if (ret) 353 return ret; 354 355 if (adf_cfg_add_device_params(accel_dev)) 356 return EFAULT; 357 358 if (hw_data->add_pke_stats && hw_data->add_pke_stats(accel_dev)) 359 return EFAULT; 360 361 if (hw_data->add_misc_error && hw_data->add_misc_error(accel_dev)) 362 return EFAULT; 363 /* 364 * Subservice initialisation is divided into two stages: init and start. 365 * This is to facilitate any ordering dependencies between services 366 * prior to starting any of the accelerators. 367 */ 368 list_for_each(list_itr, &service_table) 369 { 370 service = list_entry(list_itr, struct service_hndl, list); 371 if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { 372 device_printf(GET_DEV(accel_dev), 373 "Failed to initialise service %s\n", 374 service->name); 375 return EFAULT; 376 } 377 set_bit(accel_dev->accel_id, service->init_status); 378 } 379 380 /* Read autoreset on error parameter */ 381 ret = adf_cfg_get_param_value(accel_dev, 382 ADF_GENERAL_SEC, 383 ADF_AUTO_RESET_ON_ERROR, 384 value); 385 if (!ret) { 386 if (compat_strtouint(value, 387 10, 388 &accel_dev->autoreset_on_error)) { 389 device_printf( 390 GET_DEV(accel_dev), 391 "Failed converting %s to a decimal value\n", 392 ADF_AUTO_RESET_ON_ERROR); 393 return EFAULT; 394 } 395 } 396 397 return 0; 398 } 399 400 /** 401 * adf_dev_start() - Start acceleration service for the given accel device 402 * @accel_dev: Pointer to acceleration device. 403 * 404 * Function notifies all the registered services that the acceleration device 405 * is ready to be used. 406 * To be used by QAT device specific drivers. 407 * 408 * Return: 0 on success, error code otherwise. 409 */ 410 int 411 adf_dev_start(struct adf_accel_dev *accel_dev) 412 { 413 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 414 struct service_hndl *service; 415 struct list_head *list_itr; 416 417 set_bit(ADF_STATUS_STARTING, &accel_dev->status); 418 if (adf_devmgr_verify_id(&accel_dev->accel_id)) { 419 device_printf(GET_DEV(accel_dev), 420 "QAT: Device %d not found\n", 421 accel_dev->accel_id); 422 return ENODEV; 423 } 424 if (adf_ae_start(accel_dev)) { 425 device_printf(GET_DEV(accel_dev), "AE Start Failed\n"); 426 return EFAULT; 427 } 428 429 set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); 430 if (hw_data->send_admin_init(accel_dev)) { 431 device_printf(GET_DEV(accel_dev), 432 "Failed to send init message\n"); 433 return EFAULT; 434 } 435 436 if (adf_cfg_add_fw_version(accel_dev)) { 437 device_printf(GET_DEV(accel_dev), 438 "Failed to update configuration FW version\n"); 439 return EFAULT; 440 } 441 442 if (hw_data->measure_clock) 443 hw_data->measure_clock(accel_dev); 444 445 /* 446 * Set ssm watch dog timer for slice hang detection 447 * Note! Not supported on devices older than C62x 448 */ 449 if (hw_data->set_ssm_wdtimer && hw_data->set_ssm_wdtimer(accel_dev)) { 450 device_printf(GET_DEV(accel_dev), 451 "QAT: Failed to set ssm watch dog timer\n"); 452 return EFAULT; 453 } 454 455 if (hw_data->int_timer_init && hw_data->int_timer_init(accel_dev)) { 456 device_printf(GET_DEV(accel_dev), 457 "Failed to init heartbeat interrupt timer\n"); 458 return -EFAULT; 459 } 460 461 list_for_each(list_itr, &service_table) 462 { 463 service = list_entry(list_itr, struct service_hndl, list); 464 if (service->event_hld(accel_dev, ADF_EVENT_START)) { 465 device_printf(GET_DEV(accel_dev), 466 "Failed to start service %s\n", 467 service->name); 468 return EFAULT; 469 } 470 set_bit(accel_dev->accel_id, service->start_status); 471 } 472 473 if (accel_dev->is_vf || !accel_dev->u1.pf.vf_info) { 474 /*Register UIO devices */ 475 if (adf_uio_register(accel_dev)) { 476 adf_uio_remove(accel_dev); 477 device_printf(GET_DEV(accel_dev), 478 "Failed to register UIO devices\n"); 479 set_bit(ADF_STATUS_STARTING, &accel_dev->status); 480 clear_bit(ADF_STATUS_STARTED, &accel_dev->status); 481 return ENODEV; 482 } 483 } 484 485 if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status) && 486 adf_cfg_add_ext_params(accel_dev)) 487 return EFAULT; 488 489 clear_bit(ADF_STATUS_STARTING, &accel_dev->status); 490 set_bit(ADF_STATUS_STARTED, &accel_dev->status); 491 492 return 0; 493 } 494 495 /** 496 * adf_dev_stop() - Stop acceleration service for the given accel device 497 * @accel_dev: Pointer to acceleration device. 498 * 499 * Function notifies all the registered services that the acceleration device 500 * is shuting down. 501 * To be used by QAT device specific drivers. 502 * 503 * Return: 0 on success, error code otherwise. 504 */ 505 int 506 adf_dev_stop(struct adf_accel_dev *accel_dev) 507 { 508 struct service_hndl *service; 509 struct list_head *list_itr; 510 511 if (adf_devmgr_verify_id(&accel_dev->accel_id)) { 512 device_printf(GET_DEV(accel_dev), 513 "QAT: Device %d not found\n", 514 accel_dev->accel_id); 515 return ENODEV; 516 } 517 if (!adf_dev_started(accel_dev) && 518 !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) { 519 return 0; 520 } 521 522 if (adf_dev_stop_notify_sync(accel_dev)) { 523 device_printf( 524 GET_DEV(accel_dev), 525 "Waiting for device un-busy failed. Retries limit reached\n"); 526 return EBUSY; 527 } 528 529 clear_bit(ADF_STATUS_STARTING, &accel_dev->status); 530 clear_bit(ADF_STATUS_STARTED, &accel_dev->status); 531 532 if (accel_dev->hw_device->int_timer_exit) 533 accel_dev->hw_device->int_timer_exit(accel_dev); 534 535 list_for_each(list_itr, &service_table) 536 { 537 service = list_entry(list_itr, struct service_hndl, list); 538 if (!test_bit(accel_dev->accel_id, service->start_status)) 539 continue; 540 clear_bit(accel_dev->accel_id, service->start_status); 541 } 542 543 if (accel_dev->is_vf || !accel_dev->u1.pf.vf_info) { 544 /* Remove UIO Devices */ 545 adf_uio_remove(accel_dev); 546 } 547 548 if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) { 549 if (adf_ae_stop(accel_dev)) 550 device_printf(GET_DEV(accel_dev), 551 "failed to stop AE\n"); 552 else 553 clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); 554 } 555 556 return 0; 557 } 558 559 /** 560 * adf_dev_shutdown() - shutdown acceleration services and data strucutures 561 * @accel_dev: Pointer to acceleration device 562 * 563 * Cleanup the ring data structures and the admin comms and arbitration 564 * services. 565 */ 566 void 567 adf_dev_shutdown(struct adf_accel_dev *accel_dev) 568 { 569 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 570 struct service_hndl *service; 571 struct list_head *list_itr; 572 573 if (test_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, &accel_dev->status)) { 574 sysctl_ctx_free(&accel_dev->sysctl_ctx); 575 clear_bit(ADF_STATUS_SYSCTL_CTX_INITIALISED, 576 &accel_dev->status); 577 } 578 579 if (!hw_data) { 580 device_printf( 581 GET_DEV(accel_dev), 582 "QAT: Failed to shutdown device - hw_data not set\n"); 583 return; 584 } 585 586 if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) { 587 adf_ae_fw_release(accel_dev); 588 clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status); 589 } 590 591 if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) { 592 if (adf_ae_shutdown(accel_dev)) 593 device_printf(GET_DEV(accel_dev), 594 "Failed to shutdown Accel Engine\n"); 595 else 596 clear_bit(ADF_STATUS_AE_INITIALISED, 597 &accel_dev->status); 598 } 599 600 list_for_each(list_itr, &service_table) 601 { 602 service = list_entry(list_itr, struct service_hndl, list); 603 if (!test_bit(accel_dev->accel_id, service->init_status)) 604 continue; 605 if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) 606 device_printf(GET_DEV(accel_dev), 607 "Failed to shutdown service %s\n", 608 service->name); 609 else 610 clear_bit(accel_dev->accel_id, service->init_status); 611 } 612 613 hw_data->disable_iov(accel_dev); 614 615 if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { 616 hw_data->free_irq(accel_dev); 617 clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); 618 } 619 620 /* Delete configuration only if not restarting */ 621 if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) 622 adf_cfg_del_all(accel_dev); 623 624 if (hw_data->remove_pke_stats) 625 hw_data->remove_pke_stats(accel_dev); 626 627 if (hw_data->remove_misc_error) 628 hw_data->remove_misc_error(accel_dev); 629 630 if (hw_data->exit_ras) 631 hw_data->exit_ras(accel_dev); 632 633 if (hw_data->exit_arb) 634 hw_data->exit_arb(accel_dev); 635 636 if (hw_data->exit_admin_comms) 637 hw_data->exit_admin_comms(accel_dev); 638 639 if (hw_data->exit_accel_units) 640 hw_data->exit_accel_units(accel_dev); 641 642 adf_cleanup_etr_data(accel_dev); 643 if (hw_data->restore_device) 644 hw_data->restore_device(accel_dev); 645 } 646 647 /** 648 * adf_dev_reset() - Reset acceleration service for the given accel device 649 * @accel_dev: Pointer to acceleration device. 650 * @mode: Specifies reset mode - synchronous or asynchronous. 651 * Function notifies all the registered services that the acceleration device 652 * is resetting. 653 * To be used by QAT device specific drivers. 654 * 655 * Return: 0 on success, error code otherwise. 656 */ 657 int 658 adf_dev_reset(struct adf_accel_dev *accel_dev, enum adf_dev_reset_mode mode) 659 { 660 return adf_dev_aer_schedule_reset(accel_dev, mode); 661 } 662 663 int 664 adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) 665 { 666 struct service_hndl *service; 667 struct list_head *list_itr; 668 669 list_for_each(list_itr, &service_table) 670 { 671 service = list_entry(list_itr, struct service_hndl, list); 672 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) 673 device_printf(GET_DEV(accel_dev), 674 "Failed to restart service %s.\n", 675 service->name); 676 } 677 return 0; 678 } 679 680 int 681 adf_dev_restarting_notify_sync(struct adf_accel_dev *accel_dev) 682 { 683 int times; 684 685 adf_dev_restarting_notify(accel_dev); 686 for (times = 0; times < ADF_STOP_RETRY; times++) { 687 if (!adf_dev_in_use(accel_dev)) 688 break; 689 dev_dbg(GET_DEV(accel_dev), "retry times=%d\n", times); 690 pause_ms("adfstop", 100); 691 } 692 if (adf_dev_in_use(accel_dev)) { 693 clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); 694 device_printf(GET_DEV(accel_dev), 695 "Device still in use during reset sequence.\n"); 696 return EBUSY; 697 } 698 699 return 0; 700 } 701 702 int 703 adf_dev_stop_notify_sync(struct adf_accel_dev *accel_dev) 704 { 705 int times; 706 707 struct service_hndl *service; 708 struct list_head *list_itr; 709 710 list_for_each(list_itr, &service_table) 711 { 712 service = list_entry(list_itr, struct service_hndl, list); 713 if (service->event_hld(accel_dev, ADF_EVENT_STOP)) 714 device_printf(GET_DEV(accel_dev), 715 "Failed to restart service %s.\n", 716 service->name); 717 } 718 719 for (times = 0; times < ADF_STOP_RETRY; times++) { 720 if (!adf_dev_in_use(accel_dev)) 721 break; 722 dev_dbg(GET_DEV(accel_dev), "retry times=%d\n", times); 723 pause_ms("adfstop", 100); 724 } 725 if (adf_dev_in_use(accel_dev)) { 726 clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); 727 device_printf(GET_DEV(accel_dev), 728 "Device still in use during stop sequence.\n"); 729 return EBUSY; 730 } 731 732 return 0; 733 } 734 735 int 736 adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) 737 { 738 struct service_hndl *service; 739 struct list_head *list_itr; 740 741 list_for_each(list_itr, &service_table) 742 { 743 service = list_entry(list_itr, struct service_hndl, list); 744 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) 745 device_printf(GET_DEV(accel_dev), 746 "Failed to restart service %s.\n", 747 service->name); 748 } 749 return 0; 750 } 751