xref: /linux/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2020 Intel Corporation */
3 #include "adf_common_drv.h"
4 #include "adf_gen2_hw_data.h"
5 #include "icp_qat_hw.h"
6 #include <linux/pci.h>
7 
adf_gen2_get_num_accels(struct adf_hw_device_data * self)8 u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self)
9 {
10 	if (!self || !self->accel_mask)
11 		return 0;
12 
13 	return hweight16(self->accel_mask);
14 }
15 EXPORT_SYMBOL_GPL(adf_gen2_get_num_accels);
16 
adf_gen2_get_num_aes(struct adf_hw_device_data * self)17 u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self)
18 {
19 	if (!self || !self->ae_mask)
20 		return 0;
21 
22 	return hweight32(self->ae_mask);
23 }
24 EXPORT_SYMBOL_GPL(adf_gen2_get_num_aes);
25 
adf_gen2_enable_error_correction(struct adf_accel_dev * accel_dev)26 void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev)
27 {
28 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
29 	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
30 	unsigned long accel_mask = hw_data->accel_mask;
31 	unsigned long ae_mask = hw_data->ae_mask;
32 	unsigned int val, i;
33 
34 	/* Enable Accel Engine error detection & correction */
35 	for_each_set_bit(i, &ae_mask, hw_data->num_engines) {
36 		val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i));
37 		val |= ADF_GEN2_ENABLE_AE_ECC_ERR;
38 		ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i), val);
39 		val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i));
40 		val |= ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR;
41 		ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i), val);
42 	}
43 
44 	/* Enable shared memory error detection & correction */
45 	for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
46 		val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_UERRSSMSH(i));
47 		val |= ADF_GEN2_ERRSSMSH_EN;
48 		ADF_CSR_WR(pmisc_addr, ADF_GEN2_UERRSSMSH(i), val);
49 		val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_CERRSSMSH(i));
50 		val |= ADF_GEN2_ERRSSMSH_EN;
51 		ADF_CSR_WR(pmisc_addr, ADF_GEN2_CERRSSMSH(i), val);
52 	}
53 }
54 EXPORT_SYMBOL_GPL(adf_gen2_enable_error_correction);
55 
adf_gen2_cfg_iov_thds(struct adf_accel_dev * accel_dev,bool enable,int num_a_regs,int num_b_regs)56 void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
57 			   int num_a_regs, int num_b_regs)
58 {
59 	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
60 	u32 reg;
61 	int i;
62 
63 	/* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group A */
64 	for (i = 0; i < num_a_regs; i++) {
65 		reg = READ_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i);
66 		if (enable)
67 			reg |= AE2FUNCTION_MAP_VALID;
68 		else
69 			reg &= ~AE2FUNCTION_MAP_VALID;
70 		WRITE_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i, reg);
71 	}
72 
73 	/* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group B */
74 	for (i = 0; i < num_b_regs; i++) {
75 		reg = READ_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i);
76 		if (enable)
77 			reg |= AE2FUNCTION_MAP_VALID;
78 		else
79 			reg &= ~AE2FUNCTION_MAP_VALID;
80 		WRITE_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i, reg);
81 	}
82 }
83 EXPORT_SYMBOL_GPL(adf_gen2_cfg_iov_thds);
84 
adf_gen2_get_admin_info(struct admin_info * admin_csrs_info)85 void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info)
86 {
87 	admin_csrs_info->mailbox_offset = ADF_MAILBOX_BASE_OFFSET;
88 	admin_csrs_info->admin_msg_ur = ADF_ADMINMSGUR_OFFSET;
89 	admin_csrs_info->admin_msg_lr = ADF_ADMINMSGLR_OFFSET;
90 }
91 EXPORT_SYMBOL_GPL(adf_gen2_get_admin_info);
92 
adf_gen2_get_arb_info(struct arb_info * arb_info)93 void adf_gen2_get_arb_info(struct arb_info *arb_info)
94 {
95 	arb_info->arb_cfg = ADF_ARB_CONFIG;
96 	arb_info->arb_offset = ADF_ARB_OFFSET;
97 	arb_info->wt2sam_offset = ADF_ARB_WRK_2_SER_MAP_OFFSET;
98 }
99 EXPORT_SYMBOL_GPL(adf_gen2_get_arb_info);
100 
adf_gen2_enable_ints(struct adf_accel_dev * accel_dev)101 void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev)
102 {
103 	void __iomem *addr = adf_get_pmisc_base(accel_dev);
104 	u32 val;
105 
106 	val = accel_dev->pf.vf_info ? 0 : BIT_ULL(GET_MAX_BANKS(accel_dev)) - 1;
107 
108 	/* Enable bundle and misc interrupts */
109 	ADF_CSR_WR(addr, ADF_GEN2_SMIAPF0_MASK_OFFSET, val);
110 	ADF_CSR_WR(addr, ADF_GEN2_SMIAPF1_MASK_OFFSET, ADF_GEN2_SMIA1_MASK);
111 }
112 EXPORT_SYMBOL_GPL(adf_gen2_enable_ints);
113 
adf_gen2_get_accel_cap(struct adf_accel_dev * accel_dev)114 u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
115 {
116 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
117 	struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
118 	u32 straps = hw_data->straps;
119 	u32 fuses = hw_data->fuses;
120 	u32 legfuses;
121 	u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
122 			   ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
123 			   ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
124 			   ICP_ACCEL_CAPABILITIES_CIPHER |
125 			   ICP_ACCEL_CAPABILITIES_COMPRESSION;
126 
127 	/* Read accelerator capabilities mask */
128 	pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET, &legfuses);
129 
130 	/* A set bit in legfuses means the feature is OFF in this SKU */
131 	if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
132 		capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
133 		capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
134 	}
135 	if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
136 		capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
137 	if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
138 		capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
139 		capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
140 	}
141 	if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
142 		capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
143 
144 	if ((straps | fuses) & ADF_POWERGATE_PKE)
145 		capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
146 
147 	if ((straps | fuses) & ADF_POWERGATE_DC)
148 		capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
149 
150 	return capabilities;
151 }
152 EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap);
153 
adf_gen2_set_ssm_wdtimer(struct adf_accel_dev * accel_dev)154 void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
155 {
156 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
157 	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
158 	u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
159 	u32 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
160 	unsigned long accel_mask = hw_data->accel_mask;
161 	u32 i = 0;
162 
163 	/* Configures WDT timers */
164 	for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
165 		/* Enable WDT for sym and dc */
166 		ADF_CSR_WR(pmisc_addr, ADF_SSMWDT(i), timer_val);
167 		/* Enable WDT for pke */
168 		ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKE(i), timer_val_pke);
169 	}
170 }
171 EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer);
172