1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) 2 /* Copyright(c) 2014 - 2020 Intel Corporation */ 3 #include <linux/mutex.h> 4 #include <linux/list.h> 5 #include <linux/bitops.h> 6 #include <linux/delay.h> 7 #include "adf_accel_devices.h" 8 #include "adf_cfg.h" 9 #include "adf_common_drv.h" 10 #include "adf_dbgfs.h" 11 #include "adf_heartbeat.h" 12 13 static LIST_HEAD(service_table); 14 static DEFINE_MUTEX(service_lock); 15 16 static void adf_service_add(struct service_hndl *service) 17 { 18 mutex_lock(&service_lock); 19 list_add(&service->list, &service_table); 20 mutex_unlock(&service_lock); 21 } 22 23 int adf_service_register(struct service_hndl *service) 24 { 25 memset(service->init_status, 0, sizeof(service->init_status)); 26 memset(service->start_status, 0, sizeof(service->start_status)); 27 adf_service_add(service); 28 return 0; 29 } 30 31 static void adf_service_remove(struct service_hndl *service) 32 { 33 mutex_lock(&service_lock); 34 list_del(&service->list); 35 mutex_unlock(&service_lock); 36 } 37 38 int adf_service_unregister(struct service_hndl *service) 39 { 40 int i; 41 42 for (i = 0; i < ARRAY_SIZE(service->init_status); i++) { 43 if (service->init_status[i] || service->start_status[i]) { 44 pr_err("QAT: Could not remove active service\n"); 45 return -EFAULT; 46 } 47 } 48 adf_service_remove(service); 49 return 0; 50 } 51 52 /** 53 * adf_dev_init() - Init data structures and services for the given accel device 54 * @accel_dev: Pointer to acceleration device. 55 * 56 * Initialize the ring data structures and the admin comms and arbitration 57 * services. 58 * 59 * Return: 0 on success, error code otherwise. 60 */ 61 static int adf_dev_init(struct adf_accel_dev *accel_dev) 62 { 63 struct service_hndl *service; 64 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 65 int ret; 66 67 if (!hw_data) { 68 dev_err(&GET_DEV(accel_dev), 69 "Failed to init device - hw_data not set\n"); 70 return -EFAULT; 71 } 72 73 if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) && 74 !accel_dev->is_vf) { 75 dev_err(&GET_DEV(accel_dev), "Device not configured\n"); 76 return -EFAULT; 77 } 78 79 if (adf_init_etr_data(accel_dev)) { 80 dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n"); 81 return -EFAULT; 82 } 83 84 if (hw_data->init_device && hw_data->init_device(accel_dev)) { 85 dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n"); 86 return -EFAULT; 87 } 88 89 if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) { 90 dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n"); 91 return -EFAULT; 92 } 93 94 if (hw_data->init_arb && hw_data->init_arb(accel_dev)) { 95 dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n"); 96 return -EFAULT; 97 } 98 99 if (adf_ae_init(accel_dev)) { 100 dev_err(&GET_DEV(accel_dev), 101 "Failed to initialise Acceleration Engine\n"); 102 return -EFAULT; 103 } 104 set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status); 105 106 if (adf_ae_fw_load(accel_dev)) { 107 dev_err(&GET_DEV(accel_dev), 108 "Failed to load acceleration FW\n"); 109 return -EFAULT; 110 } 111 set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status); 112 113 if (hw_data->alloc_irq(accel_dev)) { 114 dev_err(&GET_DEV(accel_dev), "Failed to allocate interrupts\n"); 115 return -EFAULT; 116 } 117 set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); 118 119 hw_data->enable_ints(accel_dev); 120 hw_data->enable_error_correction(accel_dev); 121 122 ret = hw_data->pfvf_ops.enable_comms(accel_dev); 123 if (ret) 124 return ret; 125 126 if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status) && 127 accel_dev->is_vf) { 128 if (qat_crypto_vf_dev_config(accel_dev)) 129 return -EFAULT; 130 } 131 132 adf_heartbeat_init(accel_dev); 133 134 /* 135 * Subservice initialisation is divided into two stages: init and start. 136 * This is to facilitate any ordering dependencies between services 137 * prior to starting any of the accelerators. 138 */ 139 list_for_each_entry(service, &service_table, list) { 140 if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { 141 dev_err(&GET_DEV(accel_dev), 142 "Failed to initialise service %s\n", 143 service->name); 144 return -EFAULT; 145 } 146 set_bit(accel_dev->accel_id, service->init_status); 147 } 148 149 return 0; 150 } 151 152 /** 153 * adf_dev_start() - Start acceleration service for the given accel device 154 * @accel_dev: Pointer to acceleration device. 155 * 156 * Function notifies all the registered services that the acceleration device 157 * is ready to be used. 158 * To be used by QAT device specific drivers. 159 * 160 * Return: 0 on success, error code otherwise. 161 */ 162 static int adf_dev_start(struct adf_accel_dev *accel_dev) 163 { 164 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 165 struct service_hndl *service; 166 int ret; 167 168 set_bit(ADF_STATUS_STARTING, &accel_dev->status); 169 170 if (adf_ae_start(accel_dev)) { 171 dev_err(&GET_DEV(accel_dev), "AE Start Failed\n"); 172 return -EFAULT; 173 } 174 set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); 175 176 if (hw_data->send_admin_init(accel_dev)) { 177 dev_err(&GET_DEV(accel_dev), "Failed to send init message\n"); 178 return -EFAULT; 179 } 180 181 if (hw_data->measure_clock) { 182 ret = hw_data->measure_clock(accel_dev); 183 if (ret) { 184 dev_err(&GET_DEV(accel_dev), "Failed measure device clock\n"); 185 return ret; 186 } 187 } 188 189 /* Set ssm watch dog timer */ 190 if (hw_data->set_ssm_wdtimer) 191 hw_data->set_ssm_wdtimer(accel_dev); 192 193 /* Enable Power Management */ 194 if (hw_data->enable_pm && hw_data->enable_pm(accel_dev)) { 195 dev_err(&GET_DEV(accel_dev), "Failed to configure Power Management\n"); 196 return -EFAULT; 197 } 198 199 if (hw_data->start_timer) { 200 ret = hw_data->start_timer(accel_dev); 201 if (ret) { 202 dev_err(&GET_DEV(accel_dev), "Failed to start internal sync timer\n"); 203 return ret; 204 } 205 } 206 207 adf_heartbeat_start(accel_dev); 208 209 list_for_each_entry(service, &service_table, list) { 210 if (service->event_hld(accel_dev, ADF_EVENT_START)) { 211 dev_err(&GET_DEV(accel_dev), 212 "Failed to start service %s\n", 213 service->name); 214 return -EFAULT; 215 } 216 set_bit(accel_dev->accel_id, service->start_status); 217 } 218 219 clear_bit(ADF_STATUS_STARTING, &accel_dev->status); 220 set_bit(ADF_STATUS_STARTED, &accel_dev->status); 221 222 if (!list_empty(&accel_dev->crypto_list) && 223 (qat_algs_register() || qat_asym_algs_register())) { 224 dev_err(&GET_DEV(accel_dev), 225 "Failed to register crypto algs\n"); 226 set_bit(ADF_STATUS_STARTING, &accel_dev->status); 227 clear_bit(ADF_STATUS_STARTED, &accel_dev->status); 228 return -EFAULT; 229 } 230 set_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status); 231 232 if (!list_empty(&accel_dev->compression_list) && qat_comp_algs_register()) { 233 dev_err(&GET_DEV(accel_dev), 234 "Failed to register compression algs\n"); 235 set_bit(ADF_STATUS_STARTING, &accel_dev->status); 236 clear_bit(ADF_STATUS_STARTED, &accel_dev->status); 237 return -EFAULT; 238 } 239 set_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status); 240 241 adf_dbgfs_add(accel_dev); 242 243 return 0; 244 } 245 246 /** 247 * adf_dev_stop() - Stop acceleration service for the given accel device 248 * @accel_dev: Pointer to acceleration device. 249 * 250 * Function notifies all the registered services that the acceleration device 251 * is shuting down. 252 * To be used by QAT device specific drivers. 253 * 254 * Return: void 255 */ 256 static void adf_dev_stop(struct adf_accel_dev *accel_dev) 257 { 258 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 259 struct service_hndl *service; 260 bool wait = false; 261 int ret; 262 263 if (!adf_dev_started(accel_dev) && 264 !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) 265 return; 266 267 adf_dbgfs_rm(accel_dev); 268 269 clear_bit(ADF_STATUS_STARTING, &accel_dev->status); 270 clear_bit(ADF_STATUS_STARTED, &accel_dev->status); 271 272 if (!list_empty(&accel_dev->crypto_list) && 273 test_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status)) { 274 qat_algs_unregister(); 275 qat_asym_algs_unregister(); 276 } 277 clear_bit(ADF_STATUS_CRYPTO_ALGS_REGISTERED, &accel_dev->status); 278 279 if (!list_empty(&accel_dev->compression_list) && 280 test_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status)) 281 qat_comp_algs_unregister(); 282 clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status); 283 284 list_for_each_entry(service, &service_table, list) { 285 if (!test_bit(accel_dev->accel_id, service->start_status)) 286 continue; 287 ret = service->event_hld(accel_dev, ADF_EVENT_STOP); 288 if (!ret) { 289 clear_bit(accel_dev->accel_id, service->start_status); 290 } else if (ret == -EAGAIN) { 291 wait = true; 292 clear_bit(accel_dev->accel_id, service->start_status); 293 } 294 } 295 296 if (hw_data->stop_timer) 297 hw_data->stop_timer(accel_dev); 298 299 if (wait) 300 msleep(100); 301 302 if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) { 303 if (adf_ae_stop(accel_dev)) 304 dev_err(&GET_DEV(accel_dev), "failed to stop AE\n"); 305 else 306 clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status); 307 } 308 } 309 310 /** 311 * adf_dev_shutdown() - shutdown acceleration services and data strucutures 312 * @accel_dev: Pointer to acceleration device 313 * 314 * Cleanup the ring data structures and the admin comms and arbitration 315 * services. 316 */ 317 static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) 318 { 319 struct adf_hw_device_data *hw_data = accel_dev->hw_device; 320 struct service_hndl *service; 321 322 if (!hw_data) { 323 dev_err(&GET_DEV(accel_dev), 324 "QAT: Failed to shutdown device - hw_data not set\n"); 325 return; 326 } 327 328 if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) { 329 adf_ae_fw_release(accel_dev); 330 clear_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status); 331 } 332 333 if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) { 334 if (adf_ae_shutdown(accel_dev)) 335 dev_err(&GET_DEV(accel_dev), 336 "Failed to shutdown Accel Engine\n"); 337 else 338 clear_bit(ADF_STATUS_AE_INITIALISED, 339 &accel_dev->status); 340 } 341 342 list_for_each_entry(service, &service_table, list) { 343 if (!test_bit(accel_dev->accel_id, service->init_status)) 344 continue; 345 if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) 346 dev_err(&GET_DEV(accel_dev), 347 "Failed to shutdown service %s\n", 348 service->name); 349 else 350 clear_bit(accel_dev->accel_id, service->init_status); 351 } 352 353 adf_heartbeat_shutdown(accel_dev); 354 355 hw_data->disable_iov(accel_dev); 356 357 if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { 358 hw_data->free_irq(accel_dev); 359 clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); 360 } 361 362 /* Delete configuration only if not restarting */ 363 if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) 364 adf_cfg_del_all(accel_dev); 365 366 if (hw_data->exit_arb) 367 hw_data->exit_arb(accel_dev); 368 369 if (hw_data->exit_admin_comms) 370 hw_data->exit_admin_comms(accel_dev); 371 372 adf_cleanup_etr_data(accel_dev); 373 adf_dev_restore(accel_dev); 374 } 375 376 int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) 377 { 378 struct service_hndl *service; 379 380 list_for_each_entry(service, &service_table, list) { 381 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) 382 dev_err(&GET_DEV(accel_dev), 383 "Failed to restart service %s.\n", 384 service->name); 385 } 386 return 0; 387 } 388 389 int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) 390 { 391 struct service_hndl *service; 392 393 list_for_each_entry(service, &service_table, list) { 394 if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) 395 dev_err(&GET_DEV(accel_dev), 396 "Failed to restart service %s.\n", 397 service->name); 398 } 399 return 0; 400 } 401 402 static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev) 403 { 404 char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; 405 int ret; 406 407 ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, 408 ADF_SERVICES_ENABLED, services); 409 410 adf_dev_stop(accel_dev); 411 adf_dev_shutdown(accel_dev); 412 413 if (!ret) { 414 ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC); 415 if (ret) 416 return ret; 417 418 ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, 419 ADF_SERVICES_ENABLED, 420 services, ADF_STR); 421 if (ret) 422 return ret; 423 } 424 425 return 0; 426 } 427 428 int adf_dev_down(struct adf_accel_dev *accel_dev, bool reconfig) 429 { 430 int ret = 0; 431 432 if (!accel_dev) 433 return -EINVAL; 434 435 mutex_lock(&accel_dev->state_lock); 436 437 if (reconfig) { 438 ret = adf_dev_shutdown_cache_cfg(accel_dev); 439 goto out; 440 } 441 442 adf_dev_stop(accel_dev); 443 adf_dev_shutdown(accel_dev); 444 445 out: 446 mutex_unlock(&accel_dev->state_lock); 447 return ret; 448 } 449 EXPORT_SYMBOL_GPL(adf_dev_down); 450 451 int adf_dev_up(struct adf_accel_dev *accel_dev, bool config) 452 { 453 int ret = 0; 454 455 if (!accel_dev) 456 return -EINVAL; 457 458 mutex_lock(&accel_dev->state_lock); 459 460 if (adf_dev_started(accel_dev)) { 461 dev_info(&GET_DEV(accel_dev), "Device qat_dev%d already up\n", 462 accel_dev->accel_id); 463 ret = -EALREADY; 464 goto out; 465 } 466 467 if (config && GET_HW_DATA(accel_dev)->dev_config) { 468 ret = GET_HW_DATA(accel_dev)->dev_config(accel_dev); 469 if (unlikely(ret)) 470 goto out; 471 } 472 473 ret = adf_dev_init(accel_dev); 474 if (unlikely(ret)) 475 goto out; 476 477 ret = adf_dev_start(accel_dev); 478 479 out: 480 mutex_unlock(&accel_dev->state_lock); 481 return ret; 482 } 483 EXPORT_SYMBOL_GPL(adf_dev_up); 484 485 int adf_dev_restart(struct adf_accel_dev *accel_dev) 486 { 487 int ret = 0; 488 489 if (!accel_dev) 490 return -EFAULT; 491 492 adf_dev_down(accel_dev, false); 493 494 ret = adf_dev_up(accel_dev, false); 495 /* if device is already up return success*/ 496 if (ret == -EALREADY) 497 return 0; 498 499 return ret; 500 } 501 EXPORT_SYMBOL_GPL(adf_dev_restart); 502