xref: /linux/drivers/crypto/hisilicon/zip/dae_main.c (revision 6093a688a07da07808f0122f9aa2a3eed250d853)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2024 HiSilicon Limited. */
3 
4 #include <linux/bitops.h>
5 #include <linux/io.h>
6 #include <linux/uacce.h>
7 #include "zip.h"
8 
9 /* memory */
10 #define DAE_MEM_START_OFFSET		0x331040
11 #define DAE_MEM_DONE_OFFSET		0x331044
12 #define DAE_MEM_START_MASK		0x1
13 #define DAE_MEM_DONE_MASK		0x1
14 #define DAE_REG_RD_INTVRL_US		10
15 #define DAE_REG_RD_TMOUT_US		USEC_PER_SEC
16 
17 #define DAE_ALG_NAME			"hashagg"
18 #define DAE_V5_ALG_NAME			"hashagg\nudma\nhashjoin\ngather"
19 
20 /* error */
21 #define DAE_AXI_CFG_OFFSET		0x331000
22 #define DAE_AXI_SHUTDOWN_MASK		(BIT(0) | BIT(5))
23 #define DAE_ERR_SOURCE_OFFSET		0x331C84
24 #define DAE_ERR_STATUS_OFFSET		0x331C88
25 #define DAE_ERR_CE_OFFSET		0x331CA0
26 #define DAE_ERR_CE_MASK			BIT(3)
27 #define DAE_ERR_NFE_OFFSET		0x331CA4
28 #define DAE_ERR_NFE_MASK		0x17
29 #define DAE_ERR_FE_OFFSET		0x331CA8
30 #define DAE_ERR_FE_MASK			0
31 #define DAE_ECC_MBIT_MASK		BIT(2)
32 #define DAE_ECC_INFO_OFFSET		0x33400C
33 #define DAE_ERR_SHUTDOWN_OFFSET		0x331CAC
34 #define DAE_ERR_SHUTDOWN_MASK		0x17
35 #define DAE_ERR_ENABLE_OFFSET		0x331C80
36 #define DAE_ERR_ENABLE_MASK		(DAE_ERR_FE_MASK | DAE_ERR_NFE_MASK | DAE_ERR_CE_MASK)
37 #define DAE_AM_CTRL_GLOBAL_OFFSET	0x330000
38 #define DAE_AM_RETURN_OFFSET		0x330150
39 #define DAE_AM_RETURN_MASK		0x3
40 #define DAE_AXI_CFG_OFFSET		0x331000
41 #define DAE_AXI_SHUTDOWN_EN_MASK	(BIT(0) | BIT(5))
42 
43 struct hisi_dae_hw_error {
44 	u32 int_msk;
45 	const char *msg;
46 };
47 
48 static const struct hisi_dae_hw_error dae_hw_error[] = {
49 	{ .int_msk = BIT(0), .msg = "dae_axi_bus_err" },
50 	{ .int_msk = BIT(1), .msg = "dae_axi_poison_err" },
51 	{ .int_msk = BIT(2), .msg = "dae_ecc_2bit_err" },
52 	{ .int_msk = BIT(3), .msg = "dae_ecc_1bit_err" },
53 	{ .int_msk = BIT(4), .msg = "dae_fsm_hbeat_err" },
54 };
55 
56 static inline bool dae_is_support(struct hisi_qm *qm)
57 {
58 	if (test_bit(QM_SUPPORT_DAE, &qm->caps))
59 		return true;
60 
61 	return false;
62 }
63 
64 int hisi_dae_set_user_domain(struct hisi_qm *qm)
65 {
66 	u32 val;
67 	int ret;
68 
69 	if (!dae_is_support(qm))
70 		return 0;
71 
72 	val = readl(qm->io_base + DAE_MEM_START_OFFSET);
73 	val |= DAE_MEM_START_MASK;
74 	writel(val, qm->io_base + DAE_MEM_START_OFFSET);
75 	ret = readl_relaxed_poll_timeout(qm->io_base + DAE_MEM_DONE_OFFSET, val,
76 					 val & DAE_MEM_DONE_MASK,
77 					 DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
78 	if (ret)
79 		pci_err(qm->pdev, "failed to init dae memory!\n");
80 
81 	return ret;
82 }
83 
84 int hisi_dae_set_alg(struct hisi_qm *qm)
85 {
86 	const char *alg_name;
87 	size_t len;
88 
89 	if (!dae_is_support(qm))
90 		return 0;
91 
92 	if (!qm->uacce)
93 		return 0;
94 
95 	if (qm->ver >= QM_HW_V5)
96 		alg_name = DAE_V5_ALG_NAME;
97 	else
98 		alg_name = DAE_ALG_NAME;
99 
100 	len = strlen(qm->uacce->algs);
101 	/* A line break may be required */
102 	if (len + strlen(alg_name) + 1 >= QM_DEV_ALG_MAX_LEN) {
103 		pci_err(qm->pdev, "algorithm name is too long!\n");
104 		return -EINVAL;
105 	}
106 
107 	if (len)
108 		strcat((char *)qm->uacce->algs, "\n");
109 
110 	strcat((char *)qm->uacce->algs, alg_name);
111 
112 	return 0;
113 }
114 
115 static void hisi_dae_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
116 {
117 	u32 axi_val, err_val;
118 
119 	axi_val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
120 	if (enable) {
121 		axi_val |= DAE_AXI_SHUTDOWN_MASK;
122 		err_val = DAE_ERR_SHUTDOWN_MASK;
123 	} else {
124 		axi_val &= ~DAE_AXI_SHUTDOWN_MASK;
125 		err_val = 0;
126 	}
127 
128 	writel(axi_val, qm->io_base + DAE_AXI_CFG_OFFSET);
129 	writel(err_val, qm->io_base + DAE_ERR_SHUTDOWN_OFFSET);
130 }
131 
132 void hisi_dae_hw_error_enable(struct hisi_qm *qm)
133 {
134 	if (!dae_is_support(qm))
135 		return;
136 
137 	/* clear dae hw error source if having */
138 	writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_SOURCE_OFFSET);
139 
140 	/* configure error type */
141 	writel(DAE_ERR_CE_MASK, qm->io_base + DAE_ERR_CE_OFFSET);
142 	writel(DAE_ERR_NFE_MASK, qm->io_base + DAE_ERR_NFE_OFFSET);
143 	writel(DAE_ERR_FE_MASK, qm->io_base + DAE_ERR_FE_OFFSET);
144 
145 	hisi_dae_master_ooo_ctrl(qm, true);
146 
147 	/* enable dae hw error interrupts */
148 	writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_ENABLE_OFFSET);
149 }
150 
151 void hisi_dae_hw_error_disable(struct hisi_qm *qm)
152 {
153 	if (!dae_is_support(qm))
154 		return;
155 
156 	writel(0, qm->io_base + DAE_ERR_ENABLE_OFFSET);
157 	hisi_dae_master_ooo_ctrl(qm, false);
158 }
159 
160 static u32 hisi_dae_get_hw_err_status(struct hisi_qm *qm)
161 {
162 	return readl(qm->io_base + DAE_ERR_STATUS_OFFSET);
163 }
164 
165 static void hisi_dae_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
166 {
167 	if (!dae_is_support(qm))
168 		return;
169 
170 	writel(err_sts, qm->io_base + DAE_ERR_SOURCE_OFFSET);
171 }
172 
173 static void hisi_dae_disable_error_report(struct hisi_qm *qm, u32 err_type)
174 {
175 	writel(DAE_ERR_NFE_MASK & (~err_type), qm->io_base + DAE_ERR_NFE_OFFSET);
176 }
177 
178 static void hisi_dae_enable_error_report(struct hisi_qm *qm)
179 {
180 	writel(DAE_ERR_CE_MASK, qm->io_base + DAE_ERR_CE_OFFSET);
181 	writel(DAE_ERR_NFE_MASK, qm->io_base + DAE_ERR_NFE_OFFSET);
182 }
183 
184 static void hisi_dae_log_hw_error(struct hisi_qm *qm, u32 err_type)
185 {
186 	const struct hisi_dae_hw_error *err = dae_hw_error;
187 	struct device *dev = &qm->pdev->dev;
188 	u32 ecc_info;
189 	size_t i;
190 
191 	for (i = 0; i < ARRAY_SIZE(dae_hw_error); i++) {
192 		err = &dae_hw_error[i];
193 		if (!(err->int_msk & err_type))
194 			continue;
195 
196 		dev_err(dev, "%s [error status=0x%x] found\n",
197 			err->msg, err->int_msk);
198 
199 		if (err->int_msk & DAE_ECC_MBIT_MASK) {
200 			ecc_info = readl(qm->io_base + DAE_ECC_INFO_OFFSET);
201 			dev_err(dev, "dae multi ecc sram info 0x%x\n", ecc_info);
202 		}
203 	}
204 }
205 
206 enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm)
207 {
208 	u32 err_status;
209 
210 	if (!dae_is_support(qm))
211 		return ACC_ERR_NONE;
212 
213 	err_status = hisi_dae_get_hw_err_status(qm);
214 	if (!err_status)
215 		return ACC_ERR_NONE;
216 
217 	hisi_dae_log_hw_error(qm, err_status);
218 
219 	if (err_status & DAE_ERR_NFE_MASK) {
220 		/* Disable the same error reporting until device is recovered. */
221 		hisi_dae_disable_error_report(qm, err_status);
222 		return ACC_ERR_NEED_RESET;
223 	}
224 	hisi_dae_clear_hw_err_status(qm, err_status);
225 	/* Avoid firmware disable error report, re-enable. */
226 	hisi_dae_enable_error_report(qm);
227 
228 	return ACC_ERR_RECOVERED;
229 }
230 
231 bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm)
232 {
233 	u32 err_status;
234 
235 	if (!dae_is_support(qm))
236 		return false;
237 
238 	err_status = hisi_dae_get_hw_err_status(qm);
239 	if (err_status & DAE_ERR_NFE_MASK)
240 		return true;
241 
242 	return false;
243 }
244 
245 int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm)
246 {
247 	u32 val;
248 	int ret;
249 
250 	if (!dae_is_support(qm))
251 		return 0;
252 
253 	val = readl(qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
254 	val |= BIT(0);
255 	writel(val, qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
256 
257 	ret = readl_relaxed_poll_timeout(qm->io_base + DAE_AM_RETURN_OFFSET,
258 					 val, (val == DAE_AM_RETURN_MASK),
259 					 DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
260 	if (ret)
261 		dev_err(&qm->pdev->dev, "failed to close dae axi ooo!\n");
262 
263 	return ret;
264 }
265 
266 void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm)
267 {
268 	u32 val;
269 
270 	if (!dae_is_support(qm))
271 		return;
272 
273 	val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
274 
275 	writel(val & ~DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
276 	writel(val | DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
277 }
278