/linux/drivers/crypto/intel/qat/qat_common/ |
H A D | adf_vf_isr.c | 28 struct adf_accel_dev *accel_dev; member 32 void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) in adf_enable_pf2vf_interrupts() argument 34 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); in adf_enable_pf2vf_interrupts() 39 void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev) in adf_disable_pf2vf_interrupts() argument 41 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); in adf_disable_pf2vf_interrupts() 47 static int adf_enable_msi(struct adf_accel_dev *accel_dev) in adf_enable_msi() argument 49 struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev; in adf_enable_msi() 53 dev_err(&GET_DEV(accel_dev), in adf_enable_msi() 61 static void adf_disable_msi(struct adf_accel_dev *accel_dev) in adf_disable_msi() argument 63 struct pci_dev *pdev = accel_to_pci_dev(accel_dev); in adf_disable_msi() [all …]
|
H A D | adf_accel_engine.c | 10 static int adf_ae_fw_load_images(struct adf_accel_dev *accel_dev, void *fw_addr, in adf_ae_fw_load_images() argument 13 struct adf_fw_loader_data *loader_data = accel_dev->fw_loader; in adf_ae_fw_load_images() 14 struct adf_hw_device_data *hw_device = accel_dev->hw_device; in adf_ae_fw_load_images() 22 num_objs = hw_device->uof_get_num_objs(accel_dev); in adf_ae_fw_load_images() 25 obj_name = hw_device->uof_get_name(accel_dev, i); in adf_ae_fw_load_images() 26 ae_mask = hw_device->uof_get_ae_mask(accel_dev, i); in adf_ae_fw_load_images() 28 dev_err(&GET_DEV(accel_dev), "Invalid UOF image\n"); in adf_ae_fw_load_images() 33 dev_err(&GET_DEV(accel_dev), in adf_ae_fw_load_images() 38 dev_err(&GET_DEV(accel_dev), in adf_ae_fw_load_images() 43 dev_err(&GET_DEV(accel_dev), in adf_ae_fw_load_images() [all …]
|
H A D | adf_heartbeat.c | 26 static int adf_hb_check_polling_freq(struct adf_accel_dev *accel_dev) in adf_hb_check_polling_freq() argument 29 u64 polling_time = curr_time - accel_dev->heartbeat->last_hb_check_time; in adf_hb_check_polling_freq() 31 if (polling_time < accel_dev->heartbeat->hb_timer) { in adf_hb_check_polling_freq() 32 dev_warn(&GET_DEV(accel_dev), in adf_hb_check_polling_freq() 34 accel_dev->heartbeat->hb_timer); in adf_hb_check_polling_freq() 38 accel_dev->heartbeat->last_hb_check_time = curr_time; in adf_hb_check_polling_freq() 51 static bool validate_hb_ctrs_cnt(struct adf_accel_dev *accel_dev) in validate_hb_ctrs_cnt() argument 53 const size_t hb_ctrs = accel_dev->hw_device->num_hb_ctrs; in validate_hb_ctrs_cnt() 54 const size_t max_aes = accel_dev->hw_device->num_engines; in validate_hb_ctrs_cnt() 62 struct hb_cnt_pair *hb_stats = accel_dev->heartbeat->dma.virt_addr; in validate_hb_ctrs_cnt() [all …]
|
H A D | adf_pfvf_vf_proto.c | 33 int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, struct pfvf_message msg) in adf_send_vf2pf_msg() argument 35 struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev); in adf_send_vf2pf_msg() 38 return pfvf_ops->send_msg(accel_dev, msg, pfvf_offset, in adf_send_vf2pf_msg() 39 &accel_dev->vf.vf2pf_lock); in adf_send_vf2pf_msg() 50 static struct pfvf_message adf_recv_pf2vf_msg(struct adf_accel_dev *accel_dev) in adf_recv_pf2vf_msg() argument 52 struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev); in adf_recv_pf2vf_msg() 55 return pfvf_ops->recv_msg(accel_dev, pfvf_offset, accel_dev->vf.pf_compat_ver); in adf_recv_pf2vf_msg() 69 int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, struct pfvf_message msg, in adf_send_vf2pf_req() argument 76 reinit_completion(&accel_dev->vf.msg_received); in adf_send_vf2pf_req() 80 ret = adf_send_vf2pf_msg(accel_dev, msg); in adf_send_vf2pf_req() [all …]
|
H A D | adf_pfvf_vf_msg.c | 18 int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev) in adf_vf2pf_notify_init() argument 22 if (adf_send_vf2pf_msg(accel_dev, msg)) { in adf_vf2pf_notify_init() 23 dev_err(&GET_DEV(accel_dev), in adf_vf2pf_notify_init() 27 set_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status); in adf_vf2pf_notify_init() 40 void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev) in adf_vf2pf_notify_shutdown() argument 44 if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status)) in adf_vf2pf_notify_shutdown() 45 if (adf_send_vf2pf_msg(accel_dev, msg)) in adf_vf2pf_notify_shutdown() 46 dev_err(&GET_DEV(accel_dev), in adf_vf2pf_notify_shutdown() 51 void adf_vf2pf_notify_restart_complete(struct adf_accel_dev *accel_dev) in adf_vf2pf_notify_restart_complete() argument 56 if (accel_dev->vf.pf_compat_ver < ADF_PFVF_COMPAT_FALLBACK) in adf_vf2pf_notify_restart_complete() [all …]
|
H A D | adf_sysfs_ras_counters.c | 15 struct adf_accel_dev *accel_dev; in errors_correctable_show() local 18 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); in errors_correctable_show() 19 if (!accel_dev) in errors_correctable_show() 22 counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_CORR); in errors_correctable_show() 30 struct adf_accel_dev *accel_dev; in errors_nonfatal_show() local 33 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); in errors_nonfatal_show() 34 if (!accel_dev) in errors_nonfatal_show() 37 counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_UNCORR); in errors_nonfatal_show() 45 struct adf_accel_dev *accel_dev; in errors_fatal_show() local 48 accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); in errors_fatal_show() [all …]
|
H A D | qat_mig_dev.c | 13 struct adf_accel_dev *accel_dev; in qat_vfmig_create() local 17 accel_dev = adf_devmgr_pci_to_accel_dev(pdev); in qat_vfmig_create() 18 if (!accel_dev) in qat_vfmig_create() 21 ops = GET_VFMIG_OPS(accel_dev); in qat_vfmig_create() 32 mdev->parent_accel_dev = accel_dev; in qat_vfmig_create() 40 struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; in qat_vfmig_init() local 42 return GET_VFMIG_OPS(accel_dev)->init(mdev); in qat_vfmig_init() 48 struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; in qat_vfmig_cleanup() local 50 return GET_VFMIG_OPS(accel_dev)->cleanup(mdev); in qat_vfmig_cleanup() 56 struct adf_accel_dev *accel_dev = mdev->parent_accel_dev; in qat_vfmig_reset() local [all …]
|
H A D | adf_telemetry.c | 24 static bool is_tl_supported(struct adf_accel_dev *accel_dev) in is_tl_supported() argument 26 u16 fw_caps = GET_HW_DATA(accel_dev)->fw_capabilities; in is_tl_supported() 58 static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev) in adf_tl_alloc_mem() argument 60 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); in adf_tl_alloc_mem() 61 struct device *dev = &GET_DEV(accel_dev); in adf_tl_alloc_mem() 98 accel_dev->telemetry = telemetry; in adf_tl_alloc_mem() 119 static void adf_tl_free_mem(struct adf_accel_dev *accel_dev) in adf_tl_free_mem() argument 121 struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); in adf_tl_free_mem() 122 struct adf_telemetry *telemetry = accel_dev->telemetry; in adf_tl_free_mem() 123 struct device *dev = &GET_DEV(accel_dev); in adf_tl_free_mem() [all …]
|
H A D | adf_pfvf_pf_msg.c | 13 void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev) in adf_pf2vf_notify_restarting() argument 17 int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); in adf_pf2vf_notify_restarting() 19 dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarting\n"); in adf_pf2vf_notify_restarting() 20 for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { in adf_pf2vf_notify_restarting() 29 if (adf_send_pf2vf_msg(accel_dev, i, msg)) in adf_pf2vf_notify_restarting() 30 dev_err(&GET_DEV(accel_dev), in adf_pf2vf_notify_restarting() 35 void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev) in adf_pf2vf_wait_for_restarting_complete() argument 37 int num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); in adf_pf2vf_wait_for_restarting_complete() 42 dev_dbg(&GET_DEV(accel_dev), "pf2vf wait for restarting complete\n"); in adf_pf2vf_wait_for_restarting_complete() 45 for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) in adf_pf2vf_wait_for_restarting_complete() [all …]
|
H A D | qat_crypto.c | 21 adf_dev_put(inst->accel_dev); in qat_crypto_put_instance() 24 static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev) in qat_crypto_free_instances() argument 29 list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) { in qat_crypto_free_instances() 53 struct adf_accel_dev *accel_dev = NULL, *tmp_dev; in qat_crypto_get_instance_node() local 66 accel_dev = tmp_dev; in qat_crypto_get_instance_node() 72 if (!accel_dev) { in qat_crypto_get_instance_node() 78 accel_dev = tmp_dev; in qat_crypto_get_instance_node() 84 if (!accel_dev) in qat_crypto_get_instance_node() 88 list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) { in qat_crypto_get_instance_node() 98 if (adf_dev_get(accel_dev)) { in qat_crypto_get_instance_node() [all …]
|
H A D | adf_gen2_config.c | 13 static int adf_gen2_crypto_dev_config(struct adf_accel_dev *accel_dev) in adf_gen2_crypto_dev_config() argument 16 int banks = GET_MAX_BANKS(accel_dev); in adf_gen2_crypto_dev_config() 23 if (adf_hw_dev_has_crypto(accel_dev)) in adf_gen2_crypto_dev_config() 31 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, in adf_gen2_crypto_dev_config() 37 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, in adf_gen2_crypto_dev_config() 44 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, in adf_gen2_crypto_dev_config() 51 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, in adf_gen2_crypto_dev_config() 58 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, in adf_gen2_crypto_dev_config() 65 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, in adf_gen2_crypto_dev_config() 72 ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, in adf_gen2_crypto_dev_config() [all …]
|
H A D | adf_ctl_drv.c | 114 static int adf_add_key_value_data(struct adf_accel_dev *accel_dev, in adf_add_key_value_data() argument 122 if (adf_cfg_add_key_value_param(accel_dev, section, in adf_add_key_value_data() 125 dev_err(&GET_DEV(accel_dev), in adf_add_key_value_data() 130 if (adf_cfg_add_key_value_param(accel_dev, section, in adf_add_key_value_data() 133 dev_err(&GET_DEV(accel_dev), in adf_add_key_value_data() 141 static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev, in adf_copy_key_value_data() argument 154 dev_err(&GET_DEV(accel_dev), in adf_copy_key_value_data() 159 if (adf_cfg_section_add(accel_dev, section.name)) { in adf_copy_key_value_data() 160 dev_err(&GET_DEV(accel_dev), in adf_copy_key_value_data() 170 dev_err(&GET_DEV(accel_dev), in adf_copy_key_value_data() [all …]
|
H A D | adf_transport.c | 61 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev); in adf_enable_ring_irq() 74 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev); in adf_disable_ring_irq() 90 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev); in adf_send_message() 114 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev); in adf_handle_response() 138 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev); in adf_configure_tx_ring() 149 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev); in adf_configure_rx_ring() 163 struct adf_accel_dev *accel_dev = bank->accel_dev; in adf_init_ring() local 164 struct adf_hw_device_data *hw_data = accel_dev->hw_device; in adf_init_ring() 165 struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev); in adf_init_ring() 171 ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev), in adf_init_ring() [all …]
|
H A D | adf_pfvf_pf_proto.c | 32 int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, struct pfvf_message msg) in adf_send_pf2vf_msg() argument 34 struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev); in adf_send_pf2vf_msg() 37 return pfvf_ops->send_msg(accel_dev, msg, pfvf_offset, in adf_send_pf2vf_msg() 38 &accel_dev->pf.vf_info[vf_nr].pf2vf_lock); in adf_send_pf2vf_msg() 50 static struct pfvf_message adf_recv_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr) in adf_recv_vf2pf_msg() argument 52 struct adf_accel_vf_info *vf_info = &accel_dev->pf.vf_info[vf_nr]; in adf_recv_vf2pf_msg() 53 struct adf_pfvf_ops *pfvf_ops = GET_PFVF_OPS(accel_dev); in adf_recv_vf2pf_msg() 56 return pfvf_ops->recv_msg(accel_dev, pfvf_offset, vf_info->vf_compat_ver); in adf_recv_vf2pf_msg() 85 struct adf_accel_dev *accel_dev = vf_info->accel_dev; in adf_pf2vf_blkmsg_get_data() local 97 if (unlikely((*provider)(accel_dev, blkmsg, vf_info->vf_compat_ver))) { in adf_pf2vf_blkmsg_get_data() [all …]
|
H A D | adf_admin.h | 10 int adf_init_admin_comms(struct adf_accel_dev *accel_dev); 11 void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); 12 int adf_send_admin_init(struct adf_accel_dev *accel_dev); 13 int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps); 14 int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); 15 int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); 16 int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks); 17 int adf_send_admin_rl_init(struct adf_accel_dev *accel_dev, 19 int adf_send_admin_rl_add_update(struct adf_accel_dev *accel_dev, 21 int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id, [all …]
|
H A D | adf_heartbeat_dbgfs.c | 49 struct adf_accel_dev *accel_dev; in adf_hb_status_read() local 56 accel_dev = file->private_data; in adf_hb_status_read() 59 adf_heartbeat_status(accel_dev, &hb_status); in adf_hb_status_read() 79 struct adf_accel_dev *accel_dev; in adf_hb_cfg_read() local 86 accel_dev = file->private_data; in adf_hb_cfg_read() 87 timer_ms = accel_dev->heartbeat->hb_timer; in adf_hb_cfg_read() 98 struct adf_accel_dev *accel_dev; in adf_hb_cfg_write() local 103 accel_dev = file->private_data; in adf_hb_cfg_write() 115 dev_err(&GET_DEV(accel_dev), in adf_hb_cfg_write() 121 dev_err(&GET_DEV(accel_dev), in adf_hb_cfg_write() [all …]
|
H A D | adf_heartbeat.h | 54 int adf_heartbeat_init(struct adf_accel_dev *accel_dev); 55 int adf_heartbeat_start(struct adf_accel_dev *accel_dev); 56 void adf_heartbeat_shutdown(struct adf_accel_dev *accel_dev); 58 int adf_heartbeat_ms_to_ticks(struct adf_accel_dev *accel_dev, unsigned int time_ms, 60 int adf_heartbeat_save_cfg_param(struct adf_accel_dev *accel_dev, 62 void adf_heartbeat_status(struct adf_accel_dev *accel_dev, 64 void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev); 67 int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev); 69 static inline int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev) in adf_heartbeat_inject_error() argument 76 static inline int adf_heartbeat_init(struct adf_accel_dev *accel_dev) in adf_heartbeat_init() argument [all …]
|
H A D | adf_cfg.c | 67 int adf_cfg_dev_add(struct adf_accel_dev *accel_dev) in adf_cfg_dev_add() argument 76 accel_dev->cfg = dev_cfg_data; in adf_cfg_dev_add() 81 void adf_cfg_dev_dbgfs_add(struct adf_accel_dev *accel_dev) in adf_cfg_dev_dbgfs_add() argument 83 struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; in adf_cfg_dev_dbgfs_add() 86 accel_dev->debugfs_dir, in adf_cfg_dev_dbgfs_add() 91 void adf_cfg_dev_dbgfs_rm(struct adf_accel_dev *accel_dev) in adf_cfg_dev_dbgfs_rm() argument 93 struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; in adf_cfg_dev_dbgfs_rm() 106 void adf_cfg_del_all(struct adf_accel_dev *accel_dev) in adf_cfg_del_all() argument 108 struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg; in adf_cfg_del_all() 113 clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); in adf_cfg_del_all() [all …]
|
H A D | adf_gen4_pm.c | 18 struct adf_accel_dev *accel_dev; member 22 static int send_host_msg(struct adf_accel_dev *accel_dev) in send_host_msg() argument 25 void __iomem *pmisc = adf_get_pmisc_base(accel_dev); in send_host_msg() 26 struct adf_pm *pm = &accel_dev->power_management; in send_host_msg() 35 adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, in send_host_msg() 64 struct adf_accel_dev *accel_dev = pm_data->accel_dev; in pm_bh_handler() local 65 void __iomem *pmisc = adf_get_pmisc_base(accel_dev); in pm_bh_handler() 66 struct adf_pm *pm = &accel_dev->power_management; in pm_bh_handler() 74 if (send_host_msg(accel_dev)) in pm_bh_handler() 75 dev_warn_ratelimited(&GET_DEV(accel_dev), in pm_bh_handler() [all …]
|
H A D | adf_pfvf_pf_msg.h | 9 void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev); 10 void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev); 11 void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev); 12 void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev); 14 static inline void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev) in adf_pf2vf_notify_restarting() argument 18 static inline void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev) in adf_pf2vf_wait_for_restarting_complete() argument 22 static inline void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev) in adf_pf2vf_notify_restarted() argument 26 static inline void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev) in adf_pf2vf_notify_fatal_error() argument 31 typedef int (*adf_pf2vf_blkmsg_provider)(struct adf_accel_dev *accel_dev, 34 int adf_pf_capabilities_msg_provider(struct adf_accel_dev *accel_dev, [all …]
|
H A D | adf_heartbeat_inject.c | 11 static int adf_hb_set_timer_to_max(struct adf_accel_dev *accel_dev) in adf_hb_set_timer_to_max() argument 13 struct adf_hw_device_data *hw_data = accel_dev->hw_device; in adf_hb_set_timer_to_max() 15 accel_dev->heartbeat->hb_timer = 0; in adf_hb_set_timer_to_max() 18 hw_data->stop_timer(accel_dev); in adf_hb_set_timer_to_max() 20 return adf_send_admin_hb_timer(accel_dev, MAX_HB_TICKS); in adf_hb_set_timer_to_max() 23 static void adf_set_hb_counters_fail(struct adf_accel_dev *accel_dev, u32 ae, in adf_set_hb_counters_fail() argument 26 struct hb_cnt_pair *stats = accel_dev->heartbeat->dma.virt_addr; in adf_set_hb_counters_fail() 27 struct adf_hw_device_data *hw_device = accel_dev->hw_device; in adf_set_hb_counters_fail() 42 int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev) in adf_heartbeat_inject_error() argument 44 struct adf_hw_device_data *hw_device = accel_dev->hw_device; in adf_heartbeat_inject_error() [all …]
|
H A D | adf_fw_counters.c | 49 static int adf_fw_counters_load_from_device(struct adf_accel_dev *accel_dev, in adf_fw_counters_load_from_device() argument 52 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); in adf_fw_counters_load_from_device() 64 for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) { in adf_fw_counters_load_from_device() 68 ret = adf_get_ae_fw_counters(accel_dev, ae, &req_count, &resp_count); in adf_fw_counters_load_from_device() 107 static struct adf_fw_counters *adf_fw_counters_get(struct adf_accel_dev *accel_dev) in adf_fw_counters_get() argument 109 struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); in adf_fw_counters_get() 114 if (!adf_dev_started(accel_dev)) { in adf_fw_counters_get() 115 dev_err(&GET_DEV(accel_dev), "QAT Device not started\n"); in adf_fw_counters_get() 126 ret = adf_fw_counters_load_from_device(accel_dev, fw_counters); in adf_fw_counters_get() 129 dev_err(&GET_DEV(accel_dev), in adf_fw_counters_get() [all …]
|
/linux/drivers/crypto/intel/qat/qat_dh895xccvf/ |
H A D | adf_drv.c | 38 static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) in adf_cleanup_pci_dev() argument 40 pci_release_regions(accel_dev->accel_pci_dev.pci_dev); in adf_cleanup_pci_dev() 41 pci_disable_device(accel_dev->accel_pci_dev.pci_dev); in adf_cleanup_pci_dev() 44 static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) in adf_cleanup_accel() argument 46 struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; in adf_cleanup_accel() 57 if (accel_dev->hw_device) { in adf_cleanup_accel() 60 adf_clean_hw_data_dh895xcciov(accel_dev->hw_device); in adf_cleanup_accel() 65 kfree(accel_dev->hw_device); in adf_cleanup_accel() 66 accel_dev->hw_device = NULL; in adf_cleanup_accel() 68 adf_dbgfs_exit(accel_dev); in adf_cleanup_accel() [all …]
|
/linux/drivers/crypto/intel/qat/qat_c62xvf/ |
H A D | adf_drv.c | 38 static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) in adf_cleanup_pci_dev() argument 40 pci_release_regions(accel_dev->accel_pci_dev.pci_dev); in adf_cleanup_pci_dev() 41 pci_disable_device(accel_dev->accel_pci_dev.pci_dev); in adf_cleanup_pci_dev() 44 static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) in adf_cleanup_accel() argument 46 struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; in adf_cleanup_accel() 57 if (accel_dev->hw_device) { in adf_cleanup_accel() 60 adf_clean_hw_data_c62xiov(accel_dev->hw_device); in adf_cleanup_accel() 65 kfree(accel_dev->hw_device); in adf_cleanup_accel() 66 accel_dev->hw_device = NULL; in adf_cleanup_accel() 68 adf_dbgfs_exit(accel_dev); in adf_cleanup_accel() [all …]
|
/linux/drivers/crypto/intel/qat/qat_c3xxxvf/ |
H A D | adf_drv.c | 38 static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev) in adf_cleanup_pci_dev() argument 40 pci_release_regions(accel_dev->accel_pci_dev.pci_dev); in adf_cleanup_pci_dev() 41 pci_disable_device(accel_dev->accel_pci_dev.pci_dev); in adf_cleanup_pci_dev() 44 static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) in adf_cleanup_accel() argument 46 struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev; in adf_cleanup_accel() 57 if (accel_dev->hw_device) { in adf_cleanup_accel() 60 adf_clean_hw_data_c3xxxiov(accel_dev->hw_device); in adf_cleanup_accel() 65 kfree(accel_dev->hw_device); in adf_cleanup_accel() 66 accel_dev->hw_device = NULL; in adf_cleanup_accel() 68 adf_dbgfs_exit(accel_dev); in adf_cleanup_accel() [all …]
|