xref: /linux/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c (revision 0ea5c948cb64bab5bc7a5516774eb8536f05aa0d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2023 Intel Corporation */
3 
4 #include <linux/dma-mapping.h>
5 #include <linux/pci.h>
6 
7 #include "adf_admin.h"
8 #include "adf_accel_devices.h"
9 #include "adf_rl_admin.h"
10 
11 static void
prep_admin_req_msg(struct rl_sla * sla,dma_addr_t dma_addr,struct icp_qat_fw_init_admin_sla_config_params * fw_params,struct icp_qat_fw_init_admin_req * req,bool is_update)12 prep_admin_req_msg(struct rl_sla *sla, dma_addr_t dma_addr,
13 		   struct icp_qat_fw_init_admin_sla_config_params *fw_params,
14 		   struct icp_qat_fw_init_admin_req *req, bool is_update)
15 {
16 	req->cmd_id = is_update ? ICP_QAT_FW_RL_UPDATE : ICP_QAT_FW_RL_ADD;
17 	req->init_cfg_ptr = dma_addr;
18 	req->init_cfg_sz = sizeof(*fw_params);
19 	req->node_id = sla->node_id;
20 	req->node_type = sla->type;
21 	req->rp_count = sla->ring_pairs_cnt;
22 	req->svc_type = sla->srv;
23 }
24 
25 static void
prep_admin_req_params(struct adf_accel_dev * accel_dev,struct rl_sla * sla,struct icp_qat_fw_init_admin_sla_config_params * fw_params)26 prep_admin_req_params(struct adf_accel_dev *accel_dev, struct rl_sla *sla,
27 		      struct icp_qat_fw_init_admin_sla_config_params *fw_params)
28 {
29 	fw_params->pcie_in_cir =
30 		adf_rl_calculate_pci_bw(accel_dev, sla->cir, sla->srv, false);
31 	fw_params->pcie_in_pir =
32 		adf_rl_calculate_pci_bw(accel_dev, sla->pir, sla->srv, false);
33 	fw_params->pcie_out_cir =
34 		adf_rl_calculate_pci_bw(accel_dev, sla->cir, sla->srv, true);
35 	fw_params->pcie_out_pir =
36 		adf_rl_calculate_pci_bw(accel_dev, sla->pir, sla->srv, true);
37 
38 	fw_params->slice_util_cir =
39 		adf_rl_calculate_slice_tokens(accel_dev, sla->cir, sla->srv);
40 	fw_params->slice_util_pir =
41 		adf_rl_calculate_slice_tokens(accel_dev, sla->pir, sla->srv);
42 
43 	fw_params->ae_util_cir =
44 		adf_rl_calculate_ae_cycles(accel_dev, sla->cir, sla->srv);
45 	fw_params->ae_util_pir =
46 		adf_rl_calculate_ae_cycles(accel_dev, sla->pir, sla->srv);
47 
48 	memcpy(fw_params->rp_ids, sla->ring_pairs_ids,
49 	       sizeof(sla->ring_pairs_ids));
50 }
51 
adf_rl_send_admin_init_msg(struct adf_accel_dev * accel_dev,struct rl_slice_cnt * slices_int)52 int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev,
53 			       struct rl_slice_cnt *slices_int)
54 {
55 	struct icp_qat_fw_init_admin_slice_cnt slices_resp = { };
56 	int ret;
57 
58 	ret = adf_send_admin_rl_init(accel_dev, &slices_resp);
59 	if (ret)
60 		return ret;
61 
62 	slices_int->dcpr_cnt = slices_resp.dcpr_cnt;
63 	slices_int->pke_cnt = slices_resp.pke_cnt;
64 	/* For symmetric crypto, slice tokens are relative to the UCS slice */
65 	slices_int->cph_cnt = slices_resp.ucs_cnt;
66 
67 	return 0;
68 }
69 
adf_rl_send_admin_add_update_msg(struct adf_accel_dev * accel_dev,struct rl_sla * sla,bool is_update)70 int adf_rl_send_admin_add_update_msg(struct adf_accel_dev *accel_dev,
71 				     struct rl_sla *sla, bool is_update)
72 {
73 	struct icp_qat_fw_init_admin_sla_config_params *fw_params;
74 	struct icp_qat_fw_init_admin_req req = { };
75 	dma_addr_t dma_addr;
76 	int ret;
77 
78 	fw_params = dma_alloc_coherent(&GET_DEV(accel_dev), sizeof(*fw_params),
79 				       &dma_addr, GFP_KERNEL);
80 	if (!fw_params)
81 		return -ENOMEM;
82 
83 	prep_admin_req_params(accel_dev, sla, fw_params);
84 	prep_admin_req_msg(sla, dma_addr, fw_params, &req, is_update);
85 	ret = adf_send_admin_rl_add_update(accel_dev, &req);
86 
87 	dma_free_coherent(&GET_DEV(accel_dev), sizeof(*fw_params), fw_params,
88 			  dma_addr);
89 
90 	return ret;
91 }
92 
adf_rl_send_admin_delete_msg(struct adf_accel_dev * accel_dev,u16 node_id,u8 node_type)93 int adf_rl_send_admin_delete_msg(struct adf_accel_dev *accel_dev, u16 node_id,
94 				 u8 node_type)
95 {
96 	return adf_send_admin_rl_delete(accel_dev, node_id, node_type);
97 }
98