1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 #include "qat_freebsd.h"
4 #include "adf_cfg.h"
5 #include "adf_common_drv.h"
6 #include "adf_accel_devices.h"
7 #include "icp_qat_uclo.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_init_admin.h"
10 #include "adf_cfg_strings.h"
11 #include "adf_transport_access_macros.h"
12 #include "adf_transport_internal.h"
13 #include <linux/delay.h>
14 #include "adf_accel_devices.h"
15 #include "adf_common_drv.h"
16 #include "icp_qat_hal.h"
17 #include "icp_qat_uclo.h"
18
19 #define BAD_REGADDR 0xffff
20 #define MAX_RETRY_TIMES 1000000
21 #define INIT_CTX_ARB_VALUE 0x0
22 #define INIT_CTX_ENABLE_VALUE 0x0
23 #define INIT_PC_VALUE 0x0
24 #define INIT_WAKEUP_EVENTS_VALUE 0x1
25 #define INIT_SIG_EVENTS_VALUE 0x1
26 #define INIT_CCENABLE_VALUE 0x2000
27 #define RST_CSR_QAT_LSB 20
28 #define RST_CSR_AE_LSB 0
29 #define MC_TIMESTAMP_ENABLE (0x1 << 7)
30
31 #define IGNORE_W1C_MASK \
32 ((~(1 << CE_BREAKPOINT_BITPOS)) & \
33 (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
34 (~(1 << CE_REG_PAR_ERR_BITPOS)))
35 #define INSERT_IMMED_GPRA_CONST(inst, const_val) \
36 (inst = ((inst & 0xFFFF00C03FFull) | \
37 ((((const_val) << 12) & 0x0FF00000ull) | \
38 (((const_val) << 10) & 0x0003FC00ull))))
39 #define INSERT_IMMED_GPRB_CONST(inst, const_val) \
40 (inst = ((inst & 0xFFFF00FFF00ull) | \
41 ((((const_val) << 12) & 0x0FF00000ull) | \
42 (((const_val) << 0) & 0x000000FFull))))
43
44 #define AE(handle, ae) ((handle)->hal_handle->aes[ae])
45
46 static const uint64_t inst_4b[] = { 0x0F0400C0000ull, 0x0F4400C0000ull,
47 0x0F040000300ull, 0x0F440000300ull,
48 0x0FC066C0000ull, 0x0F0000C0300ull,
49 0x0F0000C0300ull, 0x0F0000C0300ull,
50 0x0A021000000ull };
51
52 static const uint64_t inst[] = {
53 0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
54 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
55 0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
56 0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
57 0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
58 0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
59 0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
60 0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
61 0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
62 0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
63 0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
64 0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
65 0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
66 0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
67 0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
68 0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
69 0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
70 0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
71 0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
72 0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
73 0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
74 0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
75 };
76
77 static const uint64_t inst_CPM2X[] = {
78 0x0F0000C0000ull, 0x0D802C00011ull, 0x0F0000C0001ull, 0x0FC066C0001ull,
79 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F000500300ull,
80 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0A0580C0000ull,
81 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull, 0x0A0584C0000ull,
82 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull, 0x0A0588C0000ull,
83 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull, 0x0A058CC0000ull,
84 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull, 0x0A05C0C0000ull,
85 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull, 0x0A05C4C0000ull,
86 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull, 0x0A05C8C0000ull,
87 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull, 0x0A05CCC0000ull,
88 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull, 0x0A0400C0000ull,
89 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull, 0x0A0402C0000ull,
90 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull, 0x0A0404C0000ull,
91 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull, 0x0A0406C0000ull,
92 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull, 0x0A0408C0000ull,
93 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull, 0x0A040AC0000ull,
94 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull, 0x0A040CC0000ull,
95 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull, 0x0A040EC0000ull,
96 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull, 0x0D81341C010ull,
97 0x0E000000001ull, 0x0E000010000ull,
98 };
99
100 void
qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int ctx_mask)101 qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
102 unsigned char ae,
103 unsigned int ctx_mask)
104 {
105 AE(handle, ae).live_ctx_mask = ctx_mask;
106 }
107
108 #define CSR_RETRY_TIMES 500
109 static int
qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int csr,unsigned int * value)110 qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
111 unsigned char ae,
112 unsigned int csr,
113 unsigned int *value)
114 {
115 unsigned int iterations = CSR_RETRY_TIMES;
116
117 do {
118 *value = GET_AE_CSR(handle, ae, csr);
119 if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
120 return 0;
121 } while (iterations--);
122
123 pr_err("QAT: Read CSR timeout\n");
124 return EFAULT;
125 }
126
127 static int
qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int csr,unsigned int value)128 qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
129 unsigned char ae,
130 unsigned int csr,
131 unsigned int value)
132 {
133 unsigned int iterations = CSR_RETRY_TIMES;
134
135 do {
136 SET_AE_CSR(handle, ae, csr, value);
137 if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
138 return 0;
139 } while (iterations--);
140
141 pr_err("QAT: Write CSR Timeout\n");
142 return EFAULT;
143 }
144
145 static void
qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx,unsigned int * events)146 qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
147 unsigned char ae,
148 unsigned char ctx,
149 unsigned int *events)
150 {
151 unsigned int cur_ctx;
152
153 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
154 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
155 qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
156 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
157 }
158
159 static int
qat_hal_wait_cycles(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int cycles,int chk_inactive)160 qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
161 unsigned char ae,
162 unsigned int cycles,
163 int chk_inactive)
164 {
165 unsigned int base_cnt = 0, cur_cnt = 0;
166 unsigned int csr = (1 << ACS_ABO_BITPOS);
167 int times = MAX_RETRY_TIMES;
168 int elapsed_cycles = 0;
169
170 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt);
171 base_cnt &= 0xffff;
172 while ((int)cycles > elapsed_cycles && times--) {
173 if (chk_inactive)
174 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr);
175
176 qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt);
177 cur_cnt &= 0xffff;
178 elapsed_cycles = cur_cnt - base_cnt;
179
180 if (elapsed_cycles < 0)
181 elapsed_cycles += 0x10000;
182
183 /* ensure at least 8 time cycles elapsed in wait_cycles */
184 if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
185 return 0;
186 }
187 if (times < 0) {
188 pr_err("QAT: wait_num_cycles time out\n");
189 return EFAULT;
190 }
191 return 0;
192 }
193
194 void
qat_hal_get_scs_neigh_ae(unsigned char ae,unsigned char * ae_neigh)195 qat_hal_get_scs_neigh_ae(unsigned char ae, unsigned char *ae_neigh)
196 {
197 *ae_neigh = (ae & 0x1) ? (ae - 1) : (ae + 1);
198 }
199
200 #define CLR_BIT(wrd, bit) ((wrd) & ~(1 << (bit)))
201 #define SET_BIT(wrd, bit) ((wrd) | 1 << (bit))
202
203 int
qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char mode)204 qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
205 unsigned char ae,
206 unsigned char mode)
207 {
208 unsigned int csr, new_csr;
209
210 if (mode != 4 && mode != 8) {
211 pr_err("QAT: bad ctx mode=%d\n", mode);
212 return EINVAL;
213 }
214
215 /* Sets the accelaration engine context mode to either four or eight */
216 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
217 csr = IGNORE_W1C_MASK & csr;
218 new_csr = (mode == 4) ? SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
219 CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
220 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
221 return 0;
222 }
223
224 int
qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char mode)225 qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
226 unsigned char ae,
227 unsigned char mode)
228 {
229 unsigned int csr, new_csr;
230
231 if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
232 pr_err("QAT: No next neigh for CPM2X\n");
233 return EINVAL;
234 }
235
236 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
237 csr &= IGNORE_W1C_MASK;
238
239 new_csr = (mode) ? SET_BIT(csr, CE_NN_MODE_BITPOS) :
240 CLR_BIT(csr, CE_NN_MODE_BITPOS);
241
242 if (new_csr != csr)
243 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
244
245 return 0;
246 }
247
248 int
qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle * handle,unsigned char ae,enum icp_qat_uof_regtype lm_type,unsigned char mode)249 qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
250 unsigned char ae,
251 enum icp_qat_uof_regtype lm_type,
252 unsigned char mode)
253 {
254 unsigned int csr, new_csr;
255
256 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
257 csr &= IGNORE_W1C_MASK;
258 switch (lm_type) {
259 case ICP_LMEM0:
260 new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
261 CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
262 break;
263 case ICP_LMEM1:
264 new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
265 CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
266 break;
267 case ICP_LMEM2:
268 new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS) :
269 CLR_BIT(csr, CE_LMADDR_2_GLOBAL_BITPOS);
270 break;
271 case ICP_LMEM3:
272 new_csr = (mode) ? SET_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS) :
273 CLR_BIT(csr, CE_LMADDR_3_GLOBAL_BITPOS);
274 break;
275 default:
276 pr_err("QAT: lmType = 0x%x\n", lm_type);
277 return EINVAL;
278 }
279
280 if (new_csr != csr)
281 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
282 return 0;
283 }
284
285 void
qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char mode)286 qat_hal_set_ae_tindex_mode(struct icp_qat_fw_loader_handle *handle,
287 unsigned char ae,
288 unsigned char mode)
289 {
290 unsigned int csr, new_csr;
291
292 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
293 csr &= IGNORE_W1C_MASK;
294 new_csr = (mode) ? SET_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS) :
295 CLR_BIT(csr, CE_T_INDEX_GLOBAL_BITPOS);
296 if (new_csr != csr)
297 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
298 }
299
300 void
qat_hal_set_ae_scs_mode(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char mode)301 qat_hal_set_ae_scs_mode(struct icp_qat_fw_loader_handle *handle,
302 unsigned char ae,
303 unsigned char mode)
304 {
305 unsigned int csr, new_csr;
306
307 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr);
308 new_csr = (mode) ? SET_BIT(csr, MMC_SHARE_CS_BITPOS) :
309 CLR_BIT(csr, MMC_SHARE_CS_BITPOS);
310 if (new_csr != csr)
311 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, new_csr);
312 }
313
314 static unsigned short
qat_hal_get_reg_addr(unsigned int type,unsigned short reg_num)315 qat_hal_get_reg_addr(unsigned int type, unsigned short reg_num)
316 {
317 unsigned short reg_addr;
318
319 switch (type) {
320 case ICP_GPA_ABS:
321 case ICP_GPB_ABS:
322 reg_addr = 0x80 | (reg_num & 0x7f);
323 break;
324 case ICP_GPA_REL:
325 case ICP_GPB_REL:
326 reg_addr = reg_num & 0x1f;
327 break;
328 case ICP_SR_RD_REL:
329 case ICP_SR_WR_REL:
330 case ICP_SR_REL:
331 reg_addr = 0x180 | (reg_num & 0x1f);
332 break;
333 case ICP_SR_ABS:
334 reg_addr = 0x140 | ((reg_num & 0x3) << 1);
335 break;
336 case ICP_DR_RD_REL:
337 case ICP_DR_WR_REL:
338 case ICP_DR_REL:
339 reg_addr = 0x1c0 | (reg_num & 0x1f);
340 break;
341 case ICP_DR_ABS:
342 reg_addr = 0x100 | ((reg_num & 0x3) << 1);
343 break;
344 case ICP_NEIGH_REL:
345 reg_addr = 0x280 | (reg_num & 0x1f);
346 break;
347 case ICP_LMEM0:
348 reg_addr = 0x200;
349 break;
350 case ICP_LMEM1:
351 reg_addr = 0x220;
352 break;
353 case ICP_LMEM2:
354 reg_addr = 0x2c0;
355 break;
356 case ICP_LMEM3:
357 reg_addr = 0x2e0;
358 break;
359 case ICP_NO_DEST:
360 reg_addr = 0x300 | (reg_num & 0xff);
361 break;
362 default:
363 reg_addr = BAD_REGADDR;
364 break;
365 }
366 return reg_addr;
367 }
368
369 static u32
qat_hal_get_ae_mask_gen4(struct icp_qat_fw_loader_handle * handle)370 qat_hal_get_ae_mask_gen4(struct icp_qat_fw_loader_handle *handle)
371 {
372 u32 tg = 0, ae;
373 u32 valid_ae_mask = 0;
374
375 for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
376 if (handle->hal_handle->ae_mask & (1 << ae)) {
377 tg = ae / 4;
378 valid_ae_mask |= (1 << (tg * 2));
379 }
380 }
381 return valid_ae_mask;
382 }
383
384 void
qat_hal_reset(struct icp_qat_fw_loader_handle * handle)385 qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
386 {
387 unsigned int ae_reset_csr[MAX_CPP_NUM];
388 unsigned int ae_reset_val[MAX_CPP_NUM];
389 unsigned int valid_ae_mask, valid_slice_mask;
390 unsigned int cpp_num = 1;
391 unsigned int i;
392
393 if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
394 ae_reset_csr[0] = ICP_RESET_CPP0;
395 ae_reset_csr[1] = ICP_RESET_CPP1;
396 if (handle->hal_handle->ae_mask > 0xffff)
397 ++cpp_num;
398 } else if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
399 ae_reset_csr[0] = ICP_RESET_CPP0;
400 } else {
401 ae_reset_csr[0] = ICP_RESET;
402 }
403
404 for (i = 0; i < cpp_num; i++) {
405 if (i == 0) {
406 if (IS_QAT_GEN4(
407 pci_get_device(GET_DEV(handle->accel_dev)))) {
408 valid_ae_mask =
409 qat_hal_get_ae_mask_gen4(handle);
410 valid_slice_mask =
411 handle->hal_handle->slice_mask;
412 } else {
413 valid_ae_mask =
414 handle->hal_handle->ae_mask & 0xFFFF;
415 valid_slice_mask =
416 handle->hal_handle->slice_mask & 0x3F;
417 }
418 } else {
419 valid_ae_mask =
420 (handle->hal_handle->ae_mask >> AES_PER_CPP) &
421 0xFFFF;
422 valid_slice_mask =
423 (handle->hal_handle->slice_mask >> SLICES_PER_CPP) &
424 0x3F;
425 }
426
427 ae_reset_val[i] = GET_GLB_CSR(handle, ae_reset_csr[i]);
428 ae_reset_val[i] |= valid_ae_mask << RST_CSR_AE_LSB;
429 ae_reset_val[i] |= valid_slice_mask << RST_CSR_QAT_LSB;
430 SET_GLB_CSR(handle, ae_reset_csr[i], ae_reset_val[i]);
431 }
432 }
433
434 static void
qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int ctx_mask,unsigned int ae_csr,unsigned int csr_val)435 qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
436 unsigned char ae,
437 unsigned int ctx_mask,
438 unsigned int ae_csr,
439 unsigned int csr_val)
440 {
441 unsigned int ctx, cur_ctx;
442
443 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
444
445 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
446 if (!(ctx_mask & (1 << ctx)))
447 continue;
448 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
449 qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
450 }
451
452 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
453 }
454
455 static void
qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx,unsigned int ae_csr,unsigned int * csr_val)456 qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
457 unsigned char ae,
458 unsigned char ctx,
459 unsigned int ae_csr,
460 unsigned int *csr_val)
461 {
462 unsigned int cur_ctx;
463
464 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
465 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
466 qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val);
467 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
468 }
469
470 static void
qat_hal_put_sig_event(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int ctx_mask,unsigned int events)471 qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
472 unsigned char ae,
473 unsigned int ctx_mask,
474 unsigned int events)
475 {
476 unsigned int ctx, cur_ctx;
477
478 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
479 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
480 if (!(ctx_mask & (1 << ctx)))
481 continue;
482 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
483 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
484 }
485 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
486 }
487
488 static void
qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int ctx_mask,unsigned int events)489 qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
490 unsigned char ae,
491 unsigned int ctx_mask,
492 unsigned int events)
493 {
494 unsigned int ctx, cur_ctx;
495
496 qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
497 for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
498 if (!(ctx_mask & (1 << ctx)))
499 continue;
500 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
501 qat_hal_wr_ae_csr(handle,
502 ae,
503 CTX_WAKEUP_EVENTS_INDIRECT,
504 events);
505 }
506 qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
507 }
508
509 static int
qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle * handle)510 qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
511 {
512 unsigned int base_cnt, cur_cnt;
513 unsigned char ae;
514 unsigned long ae_mask = handle->hal_handle->ae_mask;
515 int times = MAX_RETRY_TIMES;
516
517 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
518 {
519 qat_hal_rd_ae_csr(handle,
520 ae,
521 PROFILE_COUNT,
522 (unsigned int *)&base_cnt);
523 base_cnt &= 0xffff;
524
525 do {
526 qat_hal_rd_ae_csr(handle,
527 ae,
528 PROFILE_COUNT,
529 (unsigned int *)&cur_cnt);
530 cur_cnt &= 0xffff;
531 } while (times-- && (cur_cnt == base_cnt));
532
533 if (times < 0) {
534 pr_err("QAT: AE%d is inactive!!\n", ae);
535 return EFAULT;
536 }
537 }
538
539 return 0;
540 }
541
542 int
qat_hal_check_ae_active(struct icp_qat_fw_loader_handle * handle,unsigned int ae)543 qat_hal_check_ae_active(struct icp_qat_fw_loader_handle *handle,
544 unsigned int ae)
545 {
546 unsigned int enable = 0, active = 0;
547
548 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &enable);
549 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &active);
550 if ((enable & (0xff << CE_ENABLE_BITPOS)) ||
551 (active & (1 << ACS_ABO_BITPOS)))
552 return 1;
553 else
554 return 0;
555 }
556
557 static void
qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle * handle)558 qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
559 {
560 unsigned int misc_ctl_csr, misc_ctl;
561 unsigned char ae;
562 unsigned long ae_mask = handle->hal_handle->ae_mask;
563
564 misc_ctl_csr =
565 (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) ?
566 MISC_CONTROL_C4XXX :
567 MISC_CONTROL;
568 /* stop the timestamp timers */
569 misc_ctl = GET_GLB_CSR(handle, misc_ctl_csr);
570 if (misc_ctl & MC_TIMESTAMP_ENABLE)
571 SET_GLB_CSR(handle,
572 misc_ctl_csr,
573 misc_ctl & (~MC_TIMESTAMP_ENABLE));
574
575 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
576 {
577 qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
578 qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
579 }
580 /* start timestamp timers */
581 SET_GLB_CSR(handle, misc_ctl_csr, misc_ctl | MC_TIMESTAMP_ENABLE);
582 }
583
584 #define ESRAM_AUTO_TINIT BIT(2)
585 #define ESRAM_AUTO_TINIT_DONE BIT(3)
586 #define ESRAM_AUTO_INIT_USED_CYCLES (1640)
587 #define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
588
589 static int
qat_hal_init_esram(struct icp_qat_fw_loader_handle * handle)590 qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
591 {
592 uintptr_t csr_addr =
593 ((uintptr_t)handle->hal_ep_csr_addr_v + ESRAM_AUTO_INIT_CSR_OFFSET);
594 unsigned int csr_val;
595 int times = 30;
596
597 if (pci_get_device(GET_DEV(handle->accel_dev)) !=
598 ADF_DH895XCC_PCI_DEVICE_ID)
599 return 0;
600
601 csr_val = ADF_CSR_RD(handle->hal_misc_addr_v, csr_addr);
602 if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
603 return 0;
604 csr_val = ADF_CSR_RD(handle->hal_misc_addr_v, csr_addr);
605 csr_val |= ESRAM_AUTO_TINIT;
606
607 ADF_CSR_WR(handle->hal_misc_addr_v, csr_addr, csr_val);
608 do {
609 qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
610 csr_val = ADF_CSR_RD(handle->hal_misc_addr_v, csr_addr);
611
612 } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
613 if (times < 0) {
614 pr_err("QAT: Fail to init eSram!\n");
615 return EFAULT;
616 }
617 return 0;
618 }
619
620 #define SHRAM_INIT_CYCLES 2060
621 int
qat_hal_clr_reset(struct icp_qat_fw_loader_handle * handle)622 qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
623 {
624 unsigned int ae_reset_csr[MAX_CPP_NUM];
625 unsigned int ae_reset_val[MAX_CPP_NUM];
626 unsigned int cpp_num = 1;
627 unsigned int valid_ae_mask, valid_slice_mask;
628 unsigned char ae;
629 unsigned int i;
630 unsigned int clk_csr[MAX_CPP_NUM];
631 unsigned int clk_val[MAX_CPP_NUM];
632 unsigned int times = 100;
633 unsigned long ae_mask = handle->hal_handle->ae_mask;
634
635 if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
636 ae_reset_csr[0] = ICP_RESET_CPP0;
637 ae_reset_csr[1] = ICP_RESET_CPP1;
638 clk_csr[0] = ICP_GLOBAL_CLK_ENABLE_CPP0;
639 clk_csr[1] = ICP_GLOBAL_CLK_ENABLE_CPP1;
640 if (handle->hal_handle->ae_mask > 0xffff)
641 ++cpp_num;
642 } else if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
643 ae_reset_csr[0] = ICP_RESET_CPP0;
644 clk_csr[0] = ICP_GLOBAL_CLK_ENABLE_CPP0;
645 } else {
646 ae_reset_csr[0] = ICP_RESET;
647 clk_csr[0] = ICP_GLOBAL_CLK_ENABLE;
648 }
649
650 for (i = 0; i < cpp_num; i++) {
651 if (i == 0) {
652 if (IS_QAT_GEN4(
653 pci_get_device(GET_DEV(handle->accel_dev)))) {
654 valid_ae_mask =
655 qat_hal_get_ae_mask_gen4(handle);
656 valid_slice_mask =
657 handle->hal_handle->slice_mask;
658 } else {
659 valid_ae_mask =
660 handle->hal_handle->ae_mask & 0xFFFF;
661 valid_slice_mask =
662 handle->hal_handle->slice_mask & 0x3F;
663 }
664 } else {
665 valid_ae_mask =
666 (handle->hal_handle->ae_mask >> AES_PER_CPP) &
667 0xFFFF;
668 valid_slice_mask =
669 (handle->hal_handle->slice_mask >> SLICES_PER_CPP) &
670 0x3F;
671 }
672 /* write to the reset csr */
673 ae_reset_val[i] = GET_GLB_CSR(handle, ae_reset_csr[i]);
674 ae_reset_val[i] &= ~(valid_ae_mask << RST_CSR_AE_LSB);
675 ae_reset_val[i] &= ~(valid_slice_mask << RST_CSR_QAT_LSB);
676 do {
677 SET_GLB_CSR(handle, ae_reset_csr[i], ae_reset_val[i]);
678 if (!(times--))
679 goto out_err;
680 ae_reset_val[i] = GET_GLB_CSR(handle, ae_reset_csr[i]);
681 } while (
682 (valid_ae_mask | (valid_slice_mask << RST_CSR_QAT_LSB)) &
683 ae_reset_val[i]);
684 /* enable clock */
685 clk_val[i] = GET_GLB_CSR(handle, clk_csr[i]);
686 clk_val[i] |= valid_ae_mask << 0;
687 clk_val[i] |= valid_slice_mask << 20;
688 SET_GLB_CSR(handle, clk_csr[i], clk_val[i]);
689 }
690 if (qat_hal_check_ae_alive(handle))
691 goto out_err;
692
693 /* Set undefined power-up/reset states to reasonable default values */
694 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
695 {
696 qat_hal_wr_ae_csr(handle,
697 ae,
698 CTX_ENABLES,
699 INIT_CTX_ENABLE_VALUE);
700 qat_hal_wr_indr_csr(handle,
701 ae,
702 ICP_QAT_UCLO_AE_ALL_CTX,
703 CTX_STS_INDIRECT,
704 handle->hal_handle->upc_mask &
705 INIT_PC_VALUE);
706 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
707 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
708 qat_hal_put_wakeup_event(handle,
709 ae,
710 ICP_QAT_UCLO_AE_ALL_CTX,
711 INIT_WAKEUP_EVENTS_VALUE);
712 qat_hal_put_sig_event(handle,
713 ae,
714 ICP_QAT_UCLO_AE_ALL_CTX,
715 INIT_SIG_EVENTS_VALUE);
716 }
717 if (qat_hal_init_esram(handle))
718 goto out_err;
719 if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
720 goto out_err;
721 qat_hal_reset_timestamp(handle);
722
723 return 0;
724 out_err:
725 pr_err("QAT: failed to get device out of reset\n");
726 return EFAULT;
727 }
728
729 static void
qat_hal_disable_ctx(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int ctx_mask)730 qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
731 unsigned char ae,
732 unsigned int ctx_mask)
733 {
734 unsigned int ctx;
735
736 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
737 ctx &= IGNORE_W1C_MASK &
738 (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
739 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
740 }
741
742 static uint64_t
qat_hal_parity_64bit(uint64_t word)743 qat_hal_parity_64bit(uint64_t word)
744 {
745 word ^= word >> 1;
746 word ^= word >> 2;
747 word ^= word >> 4;
748 word ^= word >> 8;
749 word ^= word >> 16;
750 word ^= word >> 32;
751 return word & 1;
752 }
753
754 static uint64_t
qat_hal_set_uword_ecc(uint64_t uword)755 qat_hal_set_uword_ecc(uint64_t uword)
756 {
757 uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
758 bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
759 bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
760 bit6_mask = 0xdaf69a46910ULL;
761
762 /* clear the ecc bits */
763 uword &= ~(0x7fULL << 0x2C);
764 uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
765 uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
766 uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
767 uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
768 uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
769 uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
770 uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
771 return uword;
772 }
773
774 void
qat_hal_wr_uwords(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int uaddr,unsigned int words_num,const uint64_t * uword)775 qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
776 unsigned char ae,
777 unsigned int uaddr,
778 unsigned int words_num,
779 const uint64_t *uword)
780 {
781 unsigned int ustore_addr;
782 unsigned int i, ae_in_group;
783
784 if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
785 ae_in_group = ae / 4 * 4;
786
787 for (i = 0; i < AE_TG_NUM_CPM2X; i++) {
788 if (ae_in_group + i == ae)
789 continue;
790 if (ae_in_group + i >= handle->hal_handle->ae_max_num)
791 break;
792 if (qat_hal_check_ae_active(handle, ae_in_group + i)) {
793 pr_err(
794 "ae%d in T_group is active, cannot write to ustore!\n",
795 ae_in_group + i);
796 return;
797 }
798 }
799 }
800
801 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
802 uaddr |= UA_ECS;
803 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
804 for (i = 0; i < words_num; i++) {
805 unsigned int uwrd_lo, uwrd_hi;
806 uint64_t tmp;
807
808 tmp = qat_hal_set_uword_ecc(uword[i]);
809 uwrd_lo = (unsigned int)(tmp & 0xffffffff);
810 uwrd_hi = (unsigned int)(tmp >> 0x20);
811 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
812 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
813 }
814 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
815 }
816
817 void
qat_hal_wr_coalesce_uwords(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int uaddr,unsigned int words_num,u64 * uword)818 qat_hal_wr_coalesce_uwords(struct icp_qat_fw_loader_handle *handle,
819 unsigned char ae,
820 unsigned int uaddr,
821 unsigned int words_num,
822 u64 *uword)
823 {
824 u64 *even_uwrods, *odd_uwords;
825 unsigned char neigh_ae, odd_ae, even_ae;
826 int i, even_cpy_cnt = 0, odd_cpy_cnt = 0;
827
828 even_uwrods =
829 malloc(16 * 1024 * sizeof(*uword), M_QAT, M_WAITOK | M_ZERO);
830 odd_uwords =
831 malloc(16 * 1024 * sizeof(*uword), M_QAT, M_WAITOK | M_ZERO);
832 qat_hal_get_scs_neigh_ae(ae, &neigh_ae);
833 if (ae & 1) {
834 odd_ae = ae;
835 even_ae = neigh_ae;
836 } else {
837 odd_ae = neigh_ae;
838 even_ae = ae;
839 }
840 for (i = 0; i < words_num; i++) {
841 if ((uaddr + i) & 1)
842 odd_uwords[odd_cpy_cnt++] = uword[i];
843 else
844 even_uwrods[even_cpy_cnt++] = uword[i];
845 }
846 if (even_cpy_cnt)
847 qat_hal_wr_uwords(handle,
848 even_ae,
849 (uaddr + 1) / 2,
850 even_cpy_cnt,
851 even_uwrods);
852 if (odd_cpy_cnt)
853 qat_hal_wr_uwords(
854 handle, odd_ae, uaddr / 2, odd_cpy_cnt, odd_uwords);
855 free(even_uwrods, M_QAT);
856 free(odd_uwords, M_QAT);
857 }
858
859 static void
qat_hal_enable_ctx(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int ctx_mask)860 qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
861 unsigned char ae,
862 unsigned int ctx_mask)
863 {
864 unsigned int ctx;
865
866 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
867 ctx &= IGNORE_W1C_MASK;
868 ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
869 ctx |= (ctx_mask << CE_ENABLE_BITPOS);
870 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
871 }
872
873 static void
qat_hal_clear_xfer(struct icp_qat_fw_loader_handle * handle)874 qat_hal_clear_xfer(struct icp_qat_fw_loader_handle *handle)
875 {
876 unsigned char ae;
877 unsigned short reg;
878 unsigned long ae_mask = handle->hal_handle->ae_mask;
879
880 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
881 {
882 for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
883 qat_hal_init_rd_xfer(
884 handle, ae, 0, ICP_SR_RD_ABS, reg, 0);
885 qat_hal_init_rd_xfer(
886 handle, ae, 0, ICP_DR_RD_ABS, reg, 0);
887 }
888 }
889 }
890
891 static int
qat_hal_clear_gpr(struct icp_qat_fw_loader_handle * handle)892 qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
893 {
894 unsigned char ae;
895 unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
896 int times = MAX_RETRY_TIMES;
897 unsigned int csr_val = 0;
898 unsigned int savctx = 0;
899 unsigned int scs_flag = 0;
900 unsigned long ae_mask = handle->hal_handle->ae_mask;
901 int ret = 0;
902
903 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
904 {
905 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
906 scs_flag = csr_val & (1 << MMC_SHARE_CS_BITPOS);
907 csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
908 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
909 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
910 csr_val &= IGNORE_W1C_MASK;
911 if (!IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
912 csr_val |= CE_NN_MODE;
913 }
914 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
915
916 if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
917 if (ae % 4 == 0)
918 qat_hal_wr_uwords(handle,
919 ae,
920 0,
921 ARRAY_SIZE(inst_CPM2X),
922 (const uint64_t *)inst_CPM2X);
923 } else {
924 qat_hal_wr_uwords(handle,
925 ae,
926 0,
927 ARRAY_SIZE(inst),
928 (const uint64_t *)inst);
929 }
930 qat_hal_wr_indr_csr(handle,
931 ae,
932 ctx_mask,
933 CTX_STS_INDIRECT,
934 handle->hal_handle->upc_mask &
935 INIT_PC_VALUE);
936 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
937 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
938 qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
939 qat_hal_wr_indr_csr(
940 handle, ae, ctx_mask, CTX_SIG_EVENTS_INDIRECT, 0);
941 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
942 qat_hal_enable_ctx(handle, ae, ctx_mask);
943 }
944
945 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
946 {
947 /* wait for AE to finish */
948 do {
949 ret = qat_hal_wait_cycles(handle, ae, 20, 1);
950 } while (ret && times--);
951
952 if (times < 0) {
953 pr_err("QAT: clear GPR of AE %d failed", ae);
954 return EINVAL;
955 }
956 qat_hal_disable_ctx(handle, ae, ctx_mask);
957 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
958 if (scs_flag)
959 csr_val |= (1 << MMC_SHARE_CS_BITPOS);
960 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
961 qat_hal_wr_ae_csr(handle,
962 ae,
963 ACTIVE_CTX_STATUS,
964 savctx & ACS_ACNO);
965 qat_hal_wr_ae_csr(handle,
966 ae,
967 CTX_ENABLES,
968 INIT_CTX_ENABLE_VALUE);
969 qat_hal_wr_indr_csr(handle,
970 ae,
971 ctx_mask,
972 CTX_STS_INDIRECT,
973 handle->hal_handle->upc_mask &
974 INIT_PC_VALUE);
975 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
976 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
977 qat_hal_put_wakeup_event(handle,
978 ae,
979 ctx_mask,
980 INIT_WAKEUP_EVENTS_VALUE);
981 qat_hal_put_sig_event(handle,
982 ae,
983 ctx_mask,
984 INIT_SIG_EVENTS_VALUE);
985 }
986 return 0;
987 }
988
989 static int
qat_hal_check_imr(struct icp_qat_fw_loader_handle * handle)990 qat_hal_check_imr(struct icp_qat_fw_loader_handle *handle)
991 {
992 device_t dev = accel_to_pci_dev(handle->accel_dev);
993 u8 reg_val = 0;
994
995 if (pci_get_device(GET_DEV(handle->accel_dev)) !=
996 ADF_C3XXX_PCI_DEVICE_ID &&
997 pci_get_device(GET_DEV(handle->accel_dev)) !=
998 ADF_200XX_PCI_DEVICE_ID)
999 return 0;
1000
1001 reg_val = pci_read_config(dev, 0x04, 1);
1002 /*
1003 * PCI command register memory bit and rambaseaddr_lo address
1004 * are checked to confirm IMR2 is enabled in BIOS settings
1005 */
1006 if ((reg_val & 0x2) && GET_FCU_CSR(handle, FCU_RAMBASE_ADDR_LO))
1007 return 0;
1008
1009 return EINVAL;
1010 }
1011
1012 int
qat_hal_init(struct adf_accel_dev * accel_dev)1013 qat_hal_init(struct adf_accel_dev *accel_dev)
1014 {
1015 unsigned char ae;
1016 unsigned int cap_offset, ae_offset, ep_offset;
1017 unsigned int sram_offset = 0;
1018 unsigned int max_en_ae_id = 0;
1019 int ret = 0;
1020 unsigned long ae_mask;
1021 struct icp_qat_fw_loader_handle *handle;
1022 if (!accel_dev) {
1023 return EFAULT;
1024 }
1025 struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
1026 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1027 struct adf_bar *misc_bar =
1028 &pci_info->pci_bars[hw_data->get_misc_bar_id(hw_data)];
1029 struct adf_bar *sram_bar;
1030
1031 handle = malloc(sizeof(*handle), M_QAT, M_WAITOK | M_ZERO);
1032
1033 handle->hal_misc_addr_v = misc_bar->virt_addr;
1034 handle->accel_dev = accel_dev;
1035 if (pci_get_device(GET_DEV(handle->accel_dev)) ==
1036 ADF_DH895XCC_PCI_DEVICE_ID ||
1037 IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
1038 sram_bar =
1039 &pci_info->pci_bars[hw_data->get_sram_bar_id(hw_data)];
1040 if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev))))
1041 sram_offset =
1042 0x400000 + accel_dev->aram_info->mmp_region_offset;
1043 handle->hal_sram_addr_v = sram_bar->virt_addr;
1044 handle->hal_sram_offset = sram_offset;
1045 handle->hal_sram_size = sram_bar->size;
1046 }
1047 GET_CSR_OFFSET(pci_get_device(GET_DEV(handle->accel_dev)),
1048 cap_offset,
1049 ae_offset,
1050 ep_offset);
1051 handle->hal_cap_g_ctl_csr_addr_v = cap_offset;
1052 handle->hal_cap_ae_xfer_csr_addr_v = ae_offset;
1053 handle->hal_ep_csr_addr_v = ep_offset;
1054 handle->hal_cap_ae_local_csr_addr_v =
1055 ((uintptr_t)handle->hal_cap_ae_xfer_csr_addr_v +
1056 LOCAL_TO_XFER_REG_OFFSET);
1057 handle->fw_auth = (pci_get_device(GET_DEV(handle->accel_dev)) ==
1058 ADF_DH895XCC_PCI_DEVICE_ID) ?
1059 false :
1060 true;
1061 if (handle->fw_auth && qat_hal_check_imr(handle)) {
1062 device_printf(GET_DEV(accel_dev), "IMR2 not enabled in BIOS\n");
1063 ret = EINVAL;
1064 goto out_hal_handle;
1065 }
1066
1067 handle->hal_handle =
1068 malloc(sizeof(*handle->hal_handle), M_QAT, M_WAITOK | M_ZERO);
1069 handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
1070 handle->hal_handle->ae_mask = hw_data->ae_mask;
1071 handle->hal_handle->admin_ae_mask = hw_data->admin_ae_mask;
1072 handle->hal_handle->slice_mask = hw_data->accel_mask;
1073 handle->cfg_ae_mask = 0xFFFFFFFF;
1074 /* create AE objects */
1075 if (IS_QAT_GEN3(pci_get_device(GET_DEV(handle->accel_dev)))) {
1076 handle->hal_handle->upc_mask = 0xffff;
1077 handle->hal_handle->max_ustore = 0x2000;
1078 } else {
1079 handle->hal_handle->upc_mask = 0x1ffff;
1080 handle->hal_handle->max_ustore = 0x4000;
1081 }
1082
1083 ae_mask = hw_data->ae_mask;
1084
1085 for_each_set_bit(ae, &ae_mask, ICP_QAT_UCLO_MAX_AE)
1086 {
1087 handle->hal_handle->aes[ae].free_addr = 0;
1088 handle->hal_handle->aes[ae].free_size =
1089 handle->hal_handle->max_ustore;
1090 handle->hal_handle->aes[ae].ustore_size =
1091 handle->hal_handle->max_ustore;
1092 handle->hal_handle->aes[ae].live_ctx_mask =
1093 ICP_QAT_UCLO_AE_ALL_CTX;
1094 max_en_ae_id = ae;
1095 }
1096 handle->hal_handle->ae_max_num = max_en_ae_id + 1;
1097 /* take all AEs out of reset */
1098 if (qat_hal_clr_reset(handle)) {
1099 device_printf(GET_DEV(accel_dev), "qat_hal_clr_reset error\n");
1100 ret = EIO;
1101 goto out_err;
1102 }
1103 qat_hal_clear_xfer(handle);
1104 if (!handle->fw_auth) {
1105 if (qat_hal_clear_gpr(handle)) {
1106 ret = EIO;
1107 goto out_err;
1108 }
1109 }
1110
1111 /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
1112 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
1113 {
1114 unsigned int csr_val = 0;
1115
1116 qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
1117 csr_val |= 0x1;
1118 qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
1119 }
1120 accel_dev->fw_loader->fw_loader = handle;
1121 return 0;
1122
1123 out_err:
1124 free(handle->hal_handle, M_QAT);
1125 out_hal_handle:
1126 free(handle, M_QAT);
1127 return ret;
1128 }
1129
1130 void
qat_hal_deinit(struct icp_qat_fw_loader_handle * handle)1131 qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
1132 {
1133 if (!handle)
1134 return;
1135 free(handle->hal_handle, M_QAT);
1136 free(handle, M_QAT);
1137 }
1138
1139 int
qat_hal_start(struct icp_qat_fw_loader_handle * handle)1140 qat_hal_start(struct icp_qat_fw_loader_handle *handle)
1141 {
1142 unsigned char ae = 0;
1143 int retry = 0;
1144 unsigned int fcu_sts = 0;
1145 unsigned int fcu_ctl_csr, fcu_sts_csr;
1146 unsigned long ae_mask = handle->hal_handle->ae_mask;
1147 u32 ae_ctr = 0;
1148
1149 if (handle->fw_auth) {
1150 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
1151 {
1152 ae_ctr++;
1153 }
1154 if (IS_QAT_GEN3_OR_GEN4(
1155 pci_get_device(GET_DEV(handle->accel_dev)))) {
1156 fcu_ctl_csr = FCU_CONTROL_C4XXX;
1157 fcu_sts_csr = FCU_STATUS_C4XXX;
1158
1159 } else {
1160 fcu_ctl_csr = FCU_CONTROL;
1161 fcu_sts_csr = FCU_STATUS;
1162 }
1163 SET_FCU_CSR(handle, fcu_ctl_csr, FCU_CTRL_CMD_START);
1164 do {
1165 pause_ms("adfstop", FW_AUTH_WAIT_PERIOD);
1166 fcu_sts = GET_FCU_CSR(handle, fcu_sts_csr);
1167 if (((fcu_sts >> FCU_STS_DONE_POS) & 0x1))
1168 return ae_ctr;
1169 } while (retry++ < FW_AUTH_MAX_RETRY);
1170 pr_err("QAT: start error (AE 0x%x FCU_STS = 0x%x)\n",
1171 ae,
1172 fcu_sts);
1173 return 0;
1174 } else {
1175 for_each_set_bit(ae, &ae_mask, handle->hal_handle->ae_max_num)
1176 {
1177 qat_hal_put_wakeup_event(handle,
1178 ae,
1179 0,
1180 IS_QAT_GEN4(
1181 pci_get_device(GET_DEV(
1182 handle->accel_dev))) ?
1183 0x80000000 :
1184 0x10000);
1185 qat_hal_enable_ctx(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX);
1186 ae_ctr++;
1187 }
1188 return ae_ctr;
1189 }
1190 }
1191
1192 void
qat_hal_stop(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int ctx_mask)1193 qat_hal_stop(struct icp_qat_fw_loader_handle *handle,
1194 unsigned char ae,
1195 unsigned int ctx_mask)
1196 {
1197 if (!handle->fw_auth)
1198 qat_hal_disable_ctx(handle, ae, ctx_mask);
1199 }
1200
1201 void
qat_hal_set_pc(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int ctx_mask,unsigned int upc)1202 qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
1203 unsigned char ae,
1204 unsigned int ctx_mask,
1205 unsigned int upc)
1206 {
1207 qat_hal_wr_indr_csr(handle,
1208 ae,
1209 ctx_mask,
1210 CTX_STS_INDIRECT,
1211 handle->hal_handle->upc_mask & upc);
1212 }
1213
1214 static void
qat_hal_get_uwords(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int uaddr,unsigned int words_num,uint64_t * uword)1215 qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
1216 unsigned char ae,
1217 unsigned int uaddr,
1218 unsigned int words_num,
1219 uint64_t *uword)
1220 {
1221 unsigned int i, uwrd_lo, uwrd_hi;
1222 unsigned int ustore_addr, misc_control;
1223 unsigned int scs_flag = 0;
1224
1225 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control);
1226 scs_flag = misc_control & (0x1 << MMC_SHARE_CS_BITPOS);
1227 /*disable scs*/
1228 qat_hal_wr_ae_csr(handle,
1229 ae,
1230 AE_MISC_CONTROL,
1231 misc_control & 0xfffffffb);
1232 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
1233 uaddr |= UA_ECS;
1234 for (i = 0; i < words_num; i++) {
1235 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
1236 uaddr++;
1237 qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo);
1238 qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi);
1239 uword[i] = uwrd_hi;
1240 uword[i] = (uword[i] << 0x20) | uwrd_lo;
1241 }
1242 if (scs_flag)
1243 misc_control |= (0x1 << MMC_SHARE_CS_BITPOS);
1244 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
1245 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
1246 }
1247
1248 void
qat_hal_wr_umem(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned int uaddr,unsigned int words_num,unsigned int * data)1249 qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
1250 unsigned char ae,
1251 unsigned int uaddr,
1252 unsigned int words_num,
1253 unsigned int *data)
1254 {
1255 unsigned int i, ustore_addr;
1256
1257 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
1258 uaddr |= UA_ECS;
1259 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
1260 for (i = 0; i < words_num; i++) {
1261 unsigned int uwrd_lo, uwrd_hi, tmp;
1262
1263 uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
1264 ((data[i] & 0xff00) << 2) | (0x3 << 8) | (data[i] & 0xff);
1265 uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
1266 uwrd_hi |= (bitcount32(data[i] & 0xffff) & 0x1) << 8;
1267 tmp = ((data[i] >> 0x10) & 0xffff);
1268 uwrd_hi |= (bitcount32(tmp) & 0x1) << 9;
1269 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
1270 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
1271 }
1272 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
1273 }
1274
1275 #define MAX_EXEC_INST 100
1276 static int
qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx,uint64_t * micro_inst,unsigned int inst_num,int code_off,unsigned int max_cycle,unsigned int * endpc)1277 qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
1278 unsigned char ae,
1279 unsigned char ctx,
1280 uint64_t *micro_inst,
1281 unsigned int inst_num,
1282 int code_off,
1283 unsigned int max_cycle,
1284 unsigned int *endpc)
1285 {
1286 uint64_t savuwords[MAX_EXEC_INST];
1287 unsigned int ind_lm_addr0, ind_lm_addr1;
1288 unsigned int ind_lm_addr2, ind_lm_addr3;
1289 unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1;
1290 unsigned int ind_lm_addr_byte2, ind_lm_addr_byte3;
1291 unsigned int ind_t_index, ind_t_index_byte;
1292 unsigned int ind_cnt_sig;
1293 unsigned int ind_sig, act_sig;
1294 unsigned int csr_val = 0, newcsr_val;
1295 unsigned int savctx, scs_flag;
1296 unsigned int savcc, wakeup_events, savpc;
1297 unsigned int ctxarb_ctl, ctx_enables;
1298
1299 if (inst_num > handle->hal_handle->max_ustore || !micro_inst) {
1300 pr_err("QAT: invalid instruction num %d\n", inst_num);
1301 return EINVAL;
1302 }
1303 /* save current context */
1304 qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
1305 qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
1306 qat_hal_rd_indr_csr(
1307 handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX, &ind_lm_addr_byte0);
1308 qat_hal_rd_indr_csr(
1309 handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX, &ind_lm_addr_byte1);
1310 if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
1311 qat_hal_rd_indr_csr(
1312 handle, ae, ctx, LM_ADDR_2_INDIRECT, &ind_lm_addr2);
1313 qat_hal_rd_indr_csr(
1314 handle, ae, ctx, LM_ADDR_3_INDIRECT, &ind_lm_addr3);
1315 qat_hal_rd_indr_csr(handle,
1316 ae,
1317 ctx,
1318 INDIRECT_LM_ADDR_2_BYTE_INDEX,
1319 &ind_lm_addr_byte2);
1320 qat_hal_rd_indr_csr(handle,
1321 ae,
1322 ctx,
1323 INDIRECT_LM_ADDR_3_BYTE_INDEX,
1324 &ind_lm_addr_byte3);
1325 qat_hal_rd_indr_csr(
1326 handle, ae, ctx, INDIRECT_T_INDEX, &ind_t_index);
1327 qat_hal_rd_indr_csr(handle,
1328 ae,
1329 ctx,
1330 INDIRECT_T_INDEX_BYTE_INDEX,
1331 &ind_t_index_byte);
1332 }
1333 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
1334 scs_flag = csr_val & (1 << MMC_SHARE_CS_BITPOS);
1335 newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
1336 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
1337 if (inst_num <= MAX_EXEC_INST)
1338 qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
1339 qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
1340 qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc);
1341 savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
1342 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1343 ctx_enables &= IGNORE_W1C_MASK;
1344 qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc);
1345 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
1346 qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl);
1347 qat_hal_rd_indr_csr(
1348 handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT, &ind_cnt_sig);
1349 qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig);
1350 qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig);
1351 /* execute micro codes */
1352 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1353 qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
1354 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
1355 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
1356 if (code_off)
1357 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
1358 qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
1359 qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
1360 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
1361 qat_hal_enable_ctx(handle, ae, (1 << ctx));
1362 /* wait for micro codes to finish */
1363 if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
1364 return EFAULT;
1365 if (endpc) {
1366 unsigned int ctx_status;
1367
1368 qat_hal_rd_indr_csr(
1369 handle, ae, ctx, CTX_STS_INDIRECT, &ctx_status);
1370 *endpc = ctx_status & handle->hal_handle->upc_mask;
1371 }
1372 /* retore to saved context */
1373 qat_hal_disable_ctx(handle, ae, (1 << ctx));
1374 if (inst_num <= MAX_EXEC_INST)
1375 qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
1376 qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
1377 qat_hal_wr_indr_csr(handle,
1378 ae,
1379 (1 << ctx),
1380 CTX_STS_INDIRECT,
1381 handle->hal_handle->upc_mask & savpc);
1382 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
1383 newcsr_val = scs_flag ? SET_BIT(csr_val, MMC_SHARE_CS_BITPOS) :
1384 CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
1385 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
1386 qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
1387 qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
1388 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
1389 qat_hal_wr_indr_csr(
1390 handle, ae, (1 << ctx), LM_ADDR_0_INDIRECT, ind_lm_addr0);
1391 qat_hal_wr_indr_csr(
1392 handle, ae, (1 << ctx), LM_ADDR_1_INDIRECT, ind_lm_addr1);
1393 qat_hal_wr_indr_csr(handle,
1394 ae,
1395 (1 << ctx),
1396 INDIRECT_LM_ADDR_0_BYTE_INDEX,
1397 ind_lm_addr_byte0);
1398 qat_hal_wr_indr_csr(handle,
1399 ae,
1400 (1 << ctx),
1401 INDIRECT_LM_ADDR_1_BYTE_INDEX,
1402 ind_lm_addr_byte1);
1403 if (IS_QAT_GEN3_OR_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
1404 qat_hal_wr_indr_csr(
1405 handle, ae, (1 << ctx), LM_ADDR_2_INDIRECT, ind_lm_addr2);
1406 qat_hal_wr_indr_csr(
1407 handle, ae, (1 << ctx), LM_ADDR_3_INDIRECT, ind_lm_addr3);
1408 qat_hal_wr_indr_csr(handle,
1409 ae,
1410 (1 << ctx),
1411 INDIRECT_LM_ADDR_2_BYTE_INDEX,
1412 ind_lm_addr_byte2);
1413 qat_hal_wr_indr_csr(handle,
1414 ae,
1415 (1 << ctx),
1416 INDIRECT_LM_ADDR_3_BYTE_INDEX,
1417 ind_lm_addr_byte3);
1418 qat_hal_wr_indr_csr(
1419 handle, ae, (1 << ctx), INDIRECT_T_INDEX, ind_t_index);
1420 qat_hal_wr_indr_csr(handle,
1421 ae,
1422 (1 << ctx),
1423 INDIRECT_T_INDEX_BYTE_INDEX,
1424 ind_t_index_byte);
1425 }
1426 qat_hal_wr_indr_csr(
1427 handle, ae, (1 << ctx), FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
1428 qat_hal_wr_indr_csr(
1429 handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, ind_sig);
1430 qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
1431 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1432
1433 return 0;
1434 }
1435
1436 static int
qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx,enum icp_qat_uof_regtype reg_type,unsigned short reg_num,unsigned int * data)1437 qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
1438 unsigned char ae,
1439 unsigned char ctx,
1440 enum icp_qat_uof_regtype reg_type,
1441 unsigned short reg_num,
1442 unsigned int *data)
1443 {
1444 unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
1445 unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
1446 unsigned short reg_addr;
1447 int status = 0;
1448 unsigned int scs_flag = 0;
1449 unsigned int csr_val = 0, newcsr_val = 0;
1450 u64 insts, savuword;
1451
1452 reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1453 if (reg_addr == BAD_REGADDR) {
1454 pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
1455 return EINVAL;
1456 }
1457 switch (reg_type) {
1458 case ICP_GPA_REL:
1459 insts = 0xA070000000ull | (reg_addr & 0x3ff);
1460 break;
1461 default:
1462 insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
1463 break;
1464 }
1465 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
1466 scs_flag = csr_val & (1 << MMC_SHARE_CS_BITPOS);
1467 newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
1468 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
1469 qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
1470 qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl);
1471 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1472 ctx_enables &= IGNORE_W1C_MASK;
1473 if (ctx != (savctx & ACS_ACNO))
1474 qat_hal_wr_ae_csr(handle,
1475 ae,
1476 ACTIVE_CTX_STATUS,
1477 ctx & ACS_ACNO);
1478 qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
1479 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1480 qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
1481 uaddr = UA_ECS;
1482 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
1483 insts = qat_hal_set_uword_ecc(insts);
1484 uwrd_lo = (unsigned int)(insts & 0xffffffff);
1485 uwrd_hi = (unsigned int)(insts >> 0x20);
1486 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
1487 qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
1488 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
1489 /* delay for at least 8 cycles */
1490 qat_hal_wait_cycles(handle, ae, 0x8, 0);
1491 /*
1492 * read ALU output
1493 * the instruction should have been executed
1494 * prior to clearing the ECS in putUwords
1495 */
1496 qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data);
1497 qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
1498 qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
1499 if (ctx != (savctx & ACS_ACNO))
1500 qat_hal_wr_ae_csr(handle,
1501 ae,
1502 ACTIVE_CTX_STATUS,
1503 savctx & ACS_ACNO);
1504 qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
1505 qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
1506 newcsr_val = scs_flag ? SET_BIT(csr_val, MMC_SHARE_CS_BITPOS) :
1507 CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
1508 qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
1509 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1510
1511 return status;
1512 }
1513
1514 static int
qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx,enum icp_qat_uof_regtype reg_type,unsigned short reg_num,unsigned int data)1515 qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
1516 unsigned char ae,
1517 unsigned char ctx,
1518 enum icp_qat_uof_regtype reg_type,
1519 unsigned short reg_num,
1520 unsigned int data)
1521 {
1522 unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
1523 uint64_t insts[] = { 0x0F440000000ull,
1524 0x0F040000000ull,
1525 0x0F0000C0300ull,
1526 0x0E000010000ull };
1527 const int num_inst = ARRAY_SIZE(insts), code_off = 1;
1528 const int imm_w1 = 0, imm_w0 = 1;
1529
1530 dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1531 if (dest_addr == BAD_REGADDR) {
1532 pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
1533 return EINVAL;
1534 }
1535
1536 data16lo = 0xffff & data;
1537 data16hi = 0xffff & (data >> 0x10);
1538 src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1539 (unsigned short)(0xff & data16hi));
1540 src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1541 (unsigned short)(0xff & data16lo));
1542 switch (reg_type) {
1543 case ICP_GPA_REL:
1544 insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1545 ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1546 insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1547 ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
1548 break;
1549 default:
1550 insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
1551 ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1552
1553 insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
1554 ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1555 break;
1556 }
1557
1558 return qat_hal_exec_micro_inst(
1559 handle, ae, ctx, insts, num_inst, code_off, num_inst * 0x5, NULL);
1560 }
1561
1562 int
qat_hal_get_ins_num(void)1563 qat_hal_get_ins_num(void)
1564 {
1565 return ARRAY_SIZE(inst_4b);
1566 }
1567
1568 static int
qat_hal_concat_micro_code(uint64_t * micro_inst,unsigned int inst_num,unsigned int size,unsigned int addr,unsigned int * value)1569 qat_hal_concat_micro_code(uint64_t *micro_inst,
1570 unsigned int inst_num,
1571 unsigned int size,
1572 unsigned int addr,
1573 unsigned int *value)
1574 {
1575 int i;
1576 unsigned int cur_value;
1577 const uint64_t *inst_arr;
1578 unsigned int fixup_offset;
1579 int usize = 0;
1580 unsigned int orig_num;
1581 unsigned int delta;
1582
1583 orig_num = inst_num;
1584 fixup_offset = inst_num;
1585 cur_value = value[0];
1586 inst_arr = inst_4b;
1587 usize = ARRAY_SIZE(inst_4b);
1588 for (i = 0; i < usize; i++)
1589 micro_inst[inst_num++] = inst_arr[i];
1590 INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
1591 fixup_offset++;
1592 INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
1593 fixup_offset++;
1594 INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
1595 fixup_offset++;
1596 INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
1597
1598 delta = inst_num - orig_num;
1599
1600 return (int)delta;
1601 }
1602
1603 static int
qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx,int * pfirst_exec,uint64_t * micro_inst,unsigned int inst_num)1604 qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
1605 unsigned char ae,
1606 unsigned char ctx,
1607 int *pfirst_exec,
1608 uint64_t *micro_inst,
1609 unsigned int inst_num)
1610 {
1611 int stat = 0;
1612 unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
1613 unsigned int gprb0 = 0, gprb1 = 0;
1614
1615 if (*pfirst_exec) {
1616 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
1617 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
1618 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
1619 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
1620 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
1621 *pfirst_exec = 0;
1622 }
1623 stat = qat_hal_exec_micro_inst(
1624 handle, ae, ctx, micro_inst, inst_num, 1, inst_num * 0x5, NULL);
1625 if (stat != 0)
1626 return EFAULT;
1627 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
1628 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
1629 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
1630 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
1631 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
1632
1633 return 0;
1634 }
1635
1636 int
qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle * handle,unsigned char ae,struct icp_qat_uof_batch_init * lm_init_header)1637 qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
1638 unsigned char ae,
1639 struct icp_qat_uof_batch_init *lm_init_header)
1640 {
1641 struct icp_qat_uof_batch_init *plm_init;
1642 uint64_t *micro_inst_arry;
1643 int micro_inst_num;
1644 int alloc_inst_size;
1645 int first_exec = 1;
1646 int stat = 0;
1647
1648 if (!lm_init_header)
1649 return 0;
1650 plm_init = lm_init_header->next;
1651 alloc_inst_size = lm_init_header->size;
1652 if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
1653 alloc_inst_size = handle->hal_handle->max_ustore;
1654 micro_inst_arry = malloc(alloc_inst_size * sizeof(uint64_t),
1655 M_QAT,
1656 M_WAITOK | M_ZERO);
1657 micro_inst_num = 0;
1658 while (plm_init) {
1659 unsigned int addr, *value, size;
1660
1661 ae = plm_init->ae;
1662 addr = plm_init->addr;
1663 value = plm_init->value;
1664 size = plm_init->size;
1665 micro_inst_num += qat_hal_concat_micro_code(
1666 micro_inst_arry, micro_inst_num, size, addr, value);
1667 plm_init = plm_init->next;
1668 }
1669 /* exec micro codes */
1670 if (micro_inst_arry && micro_inst_num > 0) {
1671 micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
1672 stat = qat_hal_exec_micro_init_lm(handle,
1673 ae,
1674 0,
1675 &first_exec,
1676 micro_inst_arry,
1677 micro_inst_num);
1678 }
1679 free(micro_inst_arry, M_QAT);
1680 return stat;
1681 }
1682
1683 static int
qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx,enum icp_qat_uof_regtype reg_type,unsigned short reg_num,unsigned int val)1684 qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1685 unsigned char ae,
1686 unsigned char ctx,
1687 enum icp_qat_uof_regtype reg_type,
1688 unsigned short reg_num,
1689 unsigned int val)
1690 {
1691 int status = 0;
1692 unsigned int reg_addr;
1693 unsigned int ctx_enables;
1694 unsigned short mask;
1695 unsigned short dr_offset = 0x10;
1696
1697 status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1698 if (CE_INUSE_CONTEXTS & ctx_enables) {
1699 if (ctx & 0x1) {
1700 pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
1701 return EINVAL;
1702 }
1703 mask = 0x1f;
1704 dr_offset = 0x20;
1705 } else {
1706 mask = 0x0f;
1707 }
1708 if (reg_num & ~mask)
1709 return EINVAL;
1710 reg_addr = reg_num + (ctx << 0x5);
1711 switch (reg_type) {
1712 case ICP_SR_RD_REL:
1713 case ICP_SR_REL:
1714 SET_AE_XFER(handle, ae, reg_addr, val);
1715 break;
1716 case ICP_DR_RD_REL:
1717 case ICP_DR_REL:
1718 SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
1719 break;
1720 default:
1721 status = EINVAL;
1722 break;
1723 }
1724 return status;
1725 }
1726
1727 static int
qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx,enum icp_qat_uof_regtype reg_type,unsigned short reg_num,unsigned int data)1728 qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1729 unsigned char ae,
1730 unsigned char ctx,
1731 enum icp_qat_uof_regtype reg_type,
1732 unsigned short reg_num,
1733 unsigned int data)
1734 {
1735 unsigned int gprval, ctx_enables;
1736 unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
1737 data16low;
1738 unsigned short reg_mask;
1739 int status = 0;
1740 uint64_t micro_inst[] = { 0x0F440000000ull,
1741 0x0F040000000ull,
1742 0x0A000000000ull,
1743 0x0F0000C0300ull,
1744 0x0E000010000ull };
1745 const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
1746 const unsigned short gprnum = 0, dly = num_inst * 0x5;
1747
1748 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1749 if (CE_INUSE_CONTEXTS & ctx_enables) {
1750 if (ctx & 0x1) {
1751 pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
1752 return EINVAL;
1753 }
1754 reg_mask = (unsigned short)~0x1f;
1755 } else {
1756 reg_mask = (unsigned short)~0xf;
1757 }
1758 if (reg_num & reg_mask)
1759 return EINVAL;
1760 xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
1761 if (xfr_addr == BAD_REGADDR) {
1762 pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
1763 return EINVAL;
1764 }
1765 qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
1766 gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
1767 data16low = 0xffff & data;
1768 data16hi = 0xffff & (data >> 0x10);
1769 src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1770 (unsigned short)(0xff & data16hi));
1771 src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
1772 (unsigned short)(0xff & data16low));
1773 micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
1774 ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
1775 micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
1776 ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
1777 micro_inst[0x2] = micro_inst[0x2] | ((xfr_addr & 0x3ff) << 20) |
1778 ((gpr_addr & 0x3ff) << 10);
1779 status = qat_hal_exec_micro_inst(
1780 handle, ae, ctx, micro_inst, num_inst, code_off, dly, NULL);
1781 qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
1782 return status;
1783 }
1784
1785 static int
qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned char ctx,unsigned short nn,unsigned int val)1786 qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
1787 unsigned char ae,
1788 unsigned char ctx,
1789 unsigned short nn,
1790 unsigned int val)
1791 {
1792 unsigned int ctx_enables;
1793 int stat = 0;
1794
1795 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1796 ctx_enables &= IGNORE_W1C_MASK;
1797 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
1798
1799 stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
1800 qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
1801 return stat;
1802 }
1803
1804 static int
qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned short absreg_num,unsigned short * relreg,unsigned char * ctx)1805 qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle *handle,
1806 unsigned char ae,
1807 unsigned short absreg_num,
1808 unsigned short *relreg,
1809 unsigned char *ctx)
1810 {
1811 unsigned int ctx_enables;
1812
1813 qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
1814 if (ctx_enables & CE_INUSE_CONTEXTS) {
1815 /* 4-ctx mode */
1816 *relreg = absreg_num & 0x1F;
1817 *ctx = (absreg_num >> 0x4) & 0x6;
1818 } else {
1819 /* 8-ctx mode */
1820 *relreg = absreg_num & 0x0F;
1821 *ctx = (absreg_num >> 0x4) & 0x7;
1822 }
1823 return 0;
1824 }
1825
1826 int
qat_hal_init_gpr(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned long ctx_mask,enum icp_qat_uof_regtype reg_type,unsigned short reg_num,unsigned int regdata)1827 qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
1828 unsigned char ae,
1829 unsigned long ctx_mask,
1830 enum icp_qat_uof_regtype reg_type,
1831 unsigned short reg_num,
1832 unsigned int regdata)
1833 {
1834 int stat = 0;
1835 unsigned short reg;
1836 unsigned char ctx = 0;
1837 enum icp_qat_uof_regtype type;
1838
1839 if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
1840 return EINVAL;
1841
1842 do {
1843 if (ctx_mask == 0) {
1844 qat_hal_convert_abs_to_rel(
1845 handle, ae, reg_num, ®, &ctx);
1846 type = reg_type - 1;
1847 } else {
1848 reg = reg_num;
1849 type = reg_type;
1850 if (!test_bit(ctx, &ctx_mask))
1851 continue;
1852 }
1853 stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
1854 if (stat) {
1855 pr_err("QAT: write gpr fail\n");
1856 return EINVAL;
1857 }
1858 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1859
1860 return 0;
1861 }
1862
1863 int
qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned long ctx_mask,enum icp_qat_uof_regtype reg_type,unsigned short reg_num,unsigned int regdata)1864 qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
1865 unsigned char ae,
1866 unsigned long ctx_mask,
1867 enum icp_qat_uof_regtype reg_type,
1868 unsigned short reg_num,
1869 unsigned int regdata)
1870 {
1871 int stat = 0;
1872 unsigned short reg;
1873 unsigned char ctx = 0;
1874 enum icp_qat_uof_regtype type;
1875
1876 if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1877 return EINVAL;
1878
1879 do {
1880 if (ctx_mask == 0) {
1881 qat_hal_convert_abs_to_rel(
1882 handle, ae, reg_num, ®, &ctx);
1883 type = reg_type - 3;
1884 } else {
1885 reg = reg_num;
1886 type = reg_type;
1887 if (!test_bit(ctx, &ctx_mask))
1888 continue;
1889 }
1890 stat = qat_hal_put_rel_wr_xfer(
1891 handle, ae, ctx, type, reg, regdata);
1892 if (stat) {
1893 pr_err("QAT: write wr xfer fail\n");
1894 return EINVAL;
1895 }
1896 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1897
1898 return 0;
1899 }
1900
1901 int
qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned long ctx_mask,enum icp_qat_uof_regtype reg_type,unsigned short reg_num,unsigned int regdata)1902 qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
1903 unsigned char ae,
1904 unsigned long ctx_mask,
1905 enum icp_qat_uof_regtype reg_type,
1906 unsigned short reg_num,
1907 unsigned int regdata)
1908 {
1909 int stat = 0;
1910 unsigned short reg;
1911 unsigned char ctx = 0;
1912 enum icp_qat_uof_regtype type;
1913
1914 if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
1915 return EINVAL;
1916
1917 do {
1918 if (ctx_mask == 0) {
1919 qat_hal_convert_abs_to_rel(
1920 handle, ae, reg_num, ®, &ctx);
1921 type = reg_type - 3;
1922 } else {
1923 reg = reg_num;
1924 type = reg_type;
1925 if (!test_bit(ctx, &ctx_mask))
1926 continue;
1927 }
1928 stat = qat_hal_put_rel_rd_xfer(
1929 handle, ae, ctx, type, reg, regdata);
1930 if (stat) {
1931 pr_err("QAT: write rd xfer fail\n");
1932 return EINVAL;
1933 }
1934 } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
1935
1936 return 0;
1937 }
1938
1939 int
qat_hal_init_nn(struct icp_qat_fw_loader_handle * handle,unsigned char ae,unsigned long ctx_mask,unsigned short reg_num,unsigned int regdata)1940 qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
1941 unsigned char ae,
1942 unsigned long ctx_mask,
1943 unsigned short reg_num,
1944 unsigned int regdata)
1945 {
1946 int stat = 0;
1947 unsigned char ctx;
1948
1949 if (IS_QAT_GEN4(pci_get_device(GET_DEV(handle->accel_dev)))) {
1950 pr_err("QAT: No next neigh for CPM2X\n");
1951 return EINVAL;
1952 }
1953
1954 if (ctx_mask == 0)
1955 return EINVAL;
1956
1957 for_each_set_bit(ctx, &ctx_mask, ICP_QAT_UCLO_MAX_CTX)
1958 {
1959 stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
1960 if (stat) {
1961 pr_err("QAT: write neigh error\n");
1962 return EINVAL;
1963 }
1964 }
1965
1966 return 0;
1967 }
1968