xref: /linux/drivers/crypto/hisilicon/zip/dae_main.c (revision 454cb97726fe62a04b187a0d631ec0a69f6b713a)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2024 HiSilicon Limited. */
3 
4 #include <linux/bitops.h>
5 #include <linux/io.h>
6 #include <linux/uacce.h>
7 #include "zip.h"
8 
9 /* memory */
10 #define DAE_MEM_START_OFFSET		0x331040
11 #define DAE_MEM_DONE_OFFSET		0x331044
12 #define DAE_MEM_START_MASK		0x1
13 #define DAE_MEM_DONE_MASK		0x1
14 #define DAE_REG_RD_INTVRL_US		10
15 #define DAE_REG_RD_TMOUT_US		USEC_PER_SEC
16 
17 #define DAE_ALG_NAME			"hashagg"
18 
19 /* error */
20 #define DAE_AXI_CFG_OFFSET		0x331000
21 #define DAE_AXI_SHUTDOWN_MASK		(BIT(0) | BIT(5))
22 #define DAE_ERR_SOURCE_OFFSET		0x331C84
23 #define DAE_ERR_STATUS_OFFSET		0x331C88
24 #define DAE_ERR_CE_OFFSET		0x331CA0
25 #define DAE_ERR_CE_MASK			BIT(3)
26 #define DAE_ERR_NFE_OFFSET		0x331CA4
27 #define DAE_ERR_NFE_MASK		0x17
28 #define DAE_ERR_FE_OFFSET		0x331CA8
29 #define DAE_ERR_FE_MASK			0
30 #define DAE_ECC_MBIT_MASK		BIT(2)
31 #define DAE_ECC_INFO_OFFSET		0x33400C
32 #define DAE_ERR_SHUTDOWN_OFFSET		0x331CAC
33 #define DAE_ERR_SHUTDOWN_MASK		0x17
34 #define DAE_ERR_ENABLE_OFFSET		0x331C80
35 #define DAE_ERR_ENABLE_MASK		(DAE_ERR_FE_MASK | DAE_ERR_NFE_MASK | DAE_ERR_CE_MASK)
36 #define DAE_AM_CTRL_GLOBAL_OFFSET	0x330000
37 #define DAE_AM_RETURN_OFFSET		0x330150
38 #define DAE_AM_RETURN_MASK		0x3
39 #define DAE_AXI_CFG_OFFSET		0x331000
40 #define DAE_AXI_SHUTDOWN_EN_MASK	(BIT(0) | BIT(5))
41 
42 struct hisi_dae_hw_error {
43 	u32 int_msk;
44 	const char *msg;
45 };
46 
47 static const struct hisi_dae_hw_error dae_hw_error[] = {
48 	{ .int_msk = BIT(0), .msg = "dae_axi_bus_err" },
49 	{ .int_msk = BIT(1), .msg = "dae_axi_poison_err" },
50 	{ .int_msk = BIT(2), .msg = "dae_ecc_2bit_err" },
51 	{ .int_msk = BIT(3), .msg = "dae_ecc_1bit_err" },
52 	{ .int_msk = BIT(4), .msg = "dae_fsm_hbeat_err" },
53 };
54 
55 static inline bool dae_is_support(struct hisi_qm *qm)
56 {
57 	if (test_bit(QM_SUPPORT_DAE, &qm->caps))
58 		return true;
59 
60 	return false;
61 }
62 
63 int hisi_dae_set_user_domain(struct hisi_qm *qm)
64 {
65 	u32 val;
66 	int ret;
67 
68 	if (!dae_is_support(qm))
69 		return 0;
70 
71 	val = readl(qm->io_base + DAE_MEM_START_OFFSET);
72 	val |= DAE_MEM_START_MASK;
73 	writel(val, qm->io_base + DAE_MEM_START_OFFSET);
74 	ret = readl_relaxed_poll_timeout(qm->io_base + DAE_MEM_DONE_OFFSET, val,
75 					 val & DAE_MEM_DONE_MASK,
76 					 DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
77 	if (ret)
78 		pci_err(qm->pdev, "failed to init dae memory!\n");
79 
80 	return ret;
81 }
82 
83 int hisi_dae_set_alg(struct hisi_qm *qm)
84 {
85 	size_t len;
86 
87 	if (!dae_is_support(qm))
88 		return 0;
89 
90 	if (!qm->uacce)
91 		return 0;
92 
93 	len = strlen(qm->uacce->algs);
94 	/* A line break may be required */
95 	if (len + strlen(DAE_ALG_NAME) + 1 >= QM_DEV_ALG_MAX_LEN) {
96 		pci_err(qm->pdev, "algorithm name is too long!\n");
97 		return -EINVAL;
98 	}
99 
100 	if (len)
101 		strcat((char *)qm->uacce->algs, "\n");
102 
103 	strcat((char *)qm->uacce->algs, DAE_ALG_NAME);
104 
105 	return 0;
106 }
107 
108 static void hisi_dae_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
109 {
110 	u32 axi_val, err_val;
111 
112 	axi_val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
113 	if (enable) {
114 		axi_val |= DAE_AXI_SHUTDOWN_MASK;
115 		err_val = DAE_ERR_SHUTDOWN_MASK;
116 	} else {
117 		axi_val &= ~DAE_AXI_SHUTDOWN_MASK;
118 		err_val = 0;
119 	}
120 
121 	writel(axi_val, qm->io_base + DAE_AXI_CFG_OFFSET);
122 	writel(err_val, qm->io_base + DAE_ERR_SHUTDOWN_OFFSET);
123 }
124 
125 void hisi_dae_hw_error_enable(struct hisi_qm *qm)
126 {
127 	if (!dae_is_support(qm))
128 		return;
129 
130 	/* clear dae hw error source if having */
131 	writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_SOURCE_OFFSET);
132 
133 	/* configure error type */
134 	writel(DAE_ERR_CE_MASK, qm->io_base + DAE_ERR_CE_OFFSET);
135 	writel(DAE_ERR_NFE_MASK, qm->io_base + DAE_ERR_NFE_OFFSET);
136 	writel(DAE_ERR_FE_MASK, qm->io_base + DAE_ERR_FE_OFFSET);
137 
138 	hisi_dae_master_ooo_ctrl(qm, true);
139 
140 	/* enable dae hw error interrupts */
141 	writel(DAE_ERR_ENABLE_MASK, qm->io_base + DAE_ERR_ENABLE_OFFSET);
142 }
143 
144 void hisi_dae_hw_error_disable(struct hisi_qm *qm)
145 {
146 	if (!dae_is_support(qm))
147 		return;
148 
149 	writel(0, qm->io_base + DAE_ERR_ENABLE_OFFSET);
150 	hisi_dae_master_ooo_ctrl(qm, false);
151 }
152 
153 static u32 hisi_dae_get_hw_err_status(struct hisi_qm *qm)
154 {
155 	return readl(qm->io_base + DAE_ERR_STATUS_OFFSET);
156 }
157 
158 static void hisi_dae_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
159 {
160 	if (!dae_is_support(qm))
161 		return;
162 
163 	writel(err_sts, qm->io_base + DAE_ERR_SOURCE_OFFSET);
164 }
165 
166 static void hisi_dae_disable_error_report(struct hisi_qm *qm, u32 err_type)
167 {
168 	writel(DAE_ERR_NFE_MASK & (~err_type), qm->io_base + DAE_ERR_NFE_OFFSET);
169 }
170 
171 static void hisi_dae_log_hw_error(struct hisi_qm *qm, u32 err_type)
172 {
173 	const struct hisi_dae_hw_error *err = dae_hw_error;
174 	struct device *dev = &qm->pdev->dev;
175 	u32 ecc_info;
176 	size_t i;
177 
178 	for (i = 0; i < ARRAY_SIZE(dae_hw_error); i++) {
179 		err = &dae_hw_error[i];
180 		if (!(err->int_msk & err_type))
181 			continue;
182 
183 		dev_err(dev, "%s [error status=0x%x] found\n",
184 			err->msg, err->int_msk);
185 
186 		if (err->int_msk & DAE_ECC_MBIT_MASK) {
187 			ecc_info = readl(qm->io_base + DAE_ECC_INFO_OFFSET);
188 			dev_err(dev, "dae multi ecc sram info 0x%x\n", ecc_info);
189 		}
190 	}
191 }
192 
193 enum acc_err_result hisi_dae_get_err_result(struct hisi_qm *qm)
194 {
195 	u32 err_status;
196 
197 	if (!dae_is_support(qm))
198 		return ACC_ERR_NONE;
199 
200 	err_status = hisi_dae_get_hw_err_status(qm);
201 	if (!err_status)
202 		return ACC_ERR_NONE;
203 
204 	hisi_dae_log_hw_error(qm, err_status);
205 
206 	if (err_status & DAE_ERR_NFE_MASK) {
207 		/* Disable the same error reporting until device is recovered. */
208 		hisi_dae_disable_error_report(qm, err_status);
209 		return ACC_ERR_NEED_RESET;
210 	}
211 	hisi_dae_clear_hw_err_status(qm, err_status);
212 
213 	return ACC_ERR_RECOVERED;
214 }
215 
216 bool hisi_dae_dev_is_abnormal(struct hisi_qm *qm)
217 {
218 	u32 err_status;
219 
220 	if (!dae_is_support(qm))
221 		return false;
222 
223 	err_status = hisi_dae_get_hw_err_status(qm);
224 	if (err_status & DAE_ERR_NFE_MASK)
225 		return true;
226 
227 	return false;
228 }
229 
230 int hisi_dae_close_axi_master_ooo(struct hisi_qm *qm)
231 {
232 	u32 val;
233 	int ret;
234 
235 	if (!dae_is_support(qm))
236 		return 0;
237 
238 	val = readl(qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
239 	val |= BIT(0);
240 	writel(val, qm->io_base + DAE_AM_CTRL_GLOBAL_OFFSET);
241 
242 	ret = readl_relaxed_poll_timeout(qm->io_base + DAE_AM_RETURN_OFFSET,
243 					 val, (val == DAE_AM_RETURN_MASK),
244 					 DAE_REG_RD_INTVRL_US, DAE_REG_RD_TMOUT_US);
245 	if (ret)
246 		dev_err(&qm->pdev->dev, "failed to close dae axi ooo!\n");
247 
248 	return ret;
249 }
250 
251 void hisi_dae_open_axi_master_ooo(struct hisi_qm *qm)
252 {
253 	u32 val;
254 
255 	if (!dae_is_support(qm))
256 		return;
257 
258 	val = readl(qm->io_base + DAE_AXI_CFG_OFFSET);
259 
260 	writel(val & ~DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
261 	writel(val | DAE_AXI_SHUTDOWN_EN_MASK, qm->io_base + DAE_AXI_CFG_OFFSET);
262 }
263