1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 #include <linux/iopoll.h>
4 #include <adf_accel_devices.h>
5 #include <adf_cfg.h>
6 #include <adf_common_drv.h>
7 #include <adf_dev_err.h>
8 #include <adf_pfvf_msg.h>
9 #include <adf_gen4_hw_data.h>
10 #include <adf_gen4_pfvf.h>
11 #include <adf_gen4_timer.h>
12 #include "adf_4xxx_hw_data.h"
13 #include "adf_heartbeat.h"
14 #include "icp_qat_fw_init_admin.h"
15 #include "icp_qat_hw.h"
16
17 #define ADF_CONST_TABLE_SIZE 1024
18
19 struct adf_fw_config {
20 u32 ae_mask;
21 char *obj_name;
22 };
23
24 /* Accel unit information */
25 static const struct adf_accel_unit adf_4xxx_au_a_ae[] = {
26 { 0x1, 0x1, 0xF, 0x1B, 4, ADF_ACCEL_SERVICE_NULL },
27 { 0x2, 0x1, 0xF0, 0x6C0, 4, ADF_ACCEL_SERVICE_NULL },
28 { 0x4, 0x1, 0x100, 0xF000, 1, ADF_ACCEL_ADMIN },
29 };
30
31 /* Worker thread to service arbiter mappings */
32 static u32 thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = { 0x5555555, 0x5555555,
33 0x5555555, 0x5555555,
34 0xAAAAAAA, 0xAAAAAAA,
35 0xAAAAAAA, 0xAAAAAAA,
36 0x0 };
37
38 /* Masks representing ME thread-service mappings.
39 * Thread 7 carries out Admin work and is thus
40 * left out.
41 */
42 static u8 default_active_thd_mask = 0x7F;
43 static u8 dc_me_active_thd_mask = 0x03;
44
45 static u32 thrd_to_arb_map_gen[ADF_4XXX_MAX_ACCELENGINES] = { 0 };
46
47 #define ADF_4XXX_ASYM_SYM \
48 (ASYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
49 ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
50 SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
51
52 #define ADF_4XXX_DC \
53 (COMP | COMP << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
54 COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
55 COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
56
57 #define ADF_4XXX_SYM \
58 (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
59 SYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
60 SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
61
62 #define ADF_4XXX_ASYM \
63 (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
64 ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
65 ASYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
66
67 #define ADF_4XXX_ASYM_DC \
68 (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
69 COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
70 COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
71
72 #define ADF_4XXX_SYM_DC \
73 (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
74 COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
75 COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
76
77 #define ADF_4XXX_NA \
78 (NA | NA << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
79 NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
80 NA << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
81
82 #define ADF_4XXX_DEFAULT_RING_TO_SRV_MAP ADF_4XXX_ASYM_SYM
83
84 struct adf_enabled_services {
85 const char svcs_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
86 u16 rng_to_svc_msk;
87 };
88
89 static struct adf_enabled_services adf_4xxx_svcs[] =
90 { { "dc", ADF_4XXX_DC },
91 { "sym", ADF_4XXX_SYM },
92 { "asym", ADF_4XXX_ASYM },
93 { "dc;asym", ADF_4XXX_ASYM_DC },
94 { "asym;dc", ADF_4XXX_ASYM_DC },
95 { "sym;dc", ADF_4XXX_SYM_DC },
96 { "dc;sym", ADF_4XXX_SYM_DC },
97 { "asym;sym", ADF_4XXX_ASYM_SYM },
98 { "sym;asym", ADF_4XXX_ASYM_SYM },
99 { "cy", ADF_4XXX_ASYM_SYM } };
100
101 static struct adf_hw_device_class adf_4xxx_class = {
102 .name = ADF_4XXX_DEVICE_NAME,
103 .type = DEV_4XXX,
104 .instances = 0,
105 };
106
107 static u32
get_accel_mask(struct adf_accel_dev * accel_dev)108 get_accel_mask(struct adf_accel_dev *accel_dev)
109 {
110 return ADF_4XXX_ACCELERATORS_MASK;
111 }
112
113 static u32
get_ae_mask(struct adf_accel_dev * accel_dev)114 get_ae_mask(struct adf_accel_dev *accel_dev)
115 {
116 u32 fusectl4 = accel_dev->hw_device->fuses;
117
118 return ~fusectl4 & ADF_4XXX_ACCELENGINES_MASK;
119 }
120
121 static void
adf_set_asym_rings_mask(struct adf_accel_dev * accel_dev)122 adf_set_asym_rings_mask(struct adf_accel_dev *accel_dev)
123 {
124 accel_dev->hw_device->asym_rings_mask = ADF_4XXX_DEF_ASYM_MASK;
125 }
126
127 static int
get_ring_to_svc_map(struct adf_accel_dev * accel_dev,u16 * ring_to_svc_map)128 get_ring_to_svc_map(struct adf_accel_dev *accel_dev, u16 *ring_to_svc_map)
129 {
130 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
131 char val[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
132 u32 i = 0;
133
134 *ring_to_svc_map = 0;
135 /* Get the services enabled by user */
136 snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
137 if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
138 return EFAULT;
139
140 for (i = 0; i < ARRAY_SIZE(adf_4xxx_svcs); i++) {
141 if (!strncmp(val,
142 adf_4xxx_svcs[i].svcs_enabled,
143 ADF_CFG_MAX_KEY_LEN_IN_BYTES)) {
144 *ring_to_svc_map = adf_4xxx_svcs[i].rng_to_svc_msk;
145 return 0;
146 }
147 }
148
149 device_printf(GET_DEV(accel_dev),
150 "Invalid services enabled: %s\n",
151 val);
152 return EFAULT;
153 }
154
155 static u32
get_num_accels(struct adf_hw_device_data * self)156 get_num_accels(struct adf_hw_device_data *self)
157 {
158 return ADF_4XXX_MAX_ACCELERATORS;
159 }
160
161 static u32
get_num_aes(struct adf_hw_device_data * self)162 get_num_aes(struct adf_hw_device_data *self)
163 {
164 if (!self || !self->ae_mask)
165 return 0;
166
167 return hweight32(self->ae_mask);
168 }
169
170 static u32
get_misc_bar_id(struct adf_hw_device_data * self)171 get_misc_bar_id(struct adf_hw_device_data *self)
172 {
173 return ADF_4XXX_PMISC_BAR;
174 }
175
176 static u32
get_etr_bar_id(struct adf_hw_device_data * self)177 get_etr_bar_id(struct adf_hw_device_data *self)
178 {
179 return ADF_4XXX_ETR_BAR;
180 }
181
182 static u32
get_sram_bar_id(struct adf_hw_device_data * self)183 get_sram_bar_id(struct adf_hw_device_data *self)
184 {
185 return ADF_4XXX_SRAM_BAR;
186 }
187
188 /*
189 * The vector routing table is used to select the MSI-X entry to use for each
190 * interrupt source.
191 * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts.
192 * The final entry corresponds to VF2PF or error interrupts.
193 * This vector table could be used to configure one MSI-X entry to be shared
194 * between multiple interrupt sources.
195 *
196 * The default routing is set to have a one to one correspondence between the
197 * interrupt source and the MSI-X entry used.
198 */
199 static void
set_msix_default_rttable(struct adf_accel_dev * accel_dev)200 set_msix_default_rttable(struct adf_accel_dev *accel_dev)
201 {
202 struct resource *csr;
203 int i;
204
205 csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
206 for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++)
207 ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i);
208 }
209
210 static u32
adf_4xxx_get_hw_cap(struct adf_accel_dev * accel_dev)211 adf_4xxx_get_hw_cap(struct adf_accel_dev *accel_dev)
212 {
213 device_t pdev = accel_dev->accel_pci_dev.pci_dev;
214 u32 fusectl1;
215 u32 capabilities;
216
217 /* Read accelerator capabilities mask */
218 fusectl1 = pci_read_config(pdev, ADF_4XXX_FUSECTL1_OFFSET, 4);
219 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
220 ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
221 ICP_ACCEL_CAPABILITIES_CIPHER |
222 ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
223 ICP_ACCEL_CAPABILITIES_COMPRESSION |
224 ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
225 ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
226 ICP_ACCEL_CAPABILITIES_SHA3 | ICP_ACCEL_CAPABILITIES_HKDF |
227 ICP_ACCEL_CAPABILITIES_SHA3_EXT | ICP_ACCEL_CAPABILITIES_SM3 |
228 ICP_ACCEL_CAPABILITIES_SM4 | ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
229 ICP_ACCEL_CAPABILITIES_AESGCM_SPC | ICP_ACCEL_CAPABILITIES_AES_V2 |
230 ICP_ACCEL_CAPABILITIES_RL | ICP_ACCEL_CAPABILITIES_ECEDMONT |
231 ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
232
233 if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
234 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
235 capabilities &= ~ICP_ACCEL_CAPABILITIES_HKDF;
236 capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
237 }
238 if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
239 capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
240 capabilities &= ~ICP_ACCEL_CAPABILITIES_SHA3;
241 capabilities &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
242 capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
243 }
244 if (fusectl1 & ICP_ACCEL_MASK_PKE_SLICE) {
245 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
246 capabilities &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
247 }
248 if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
249 capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
250 capabilities &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
251 capabilities &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
252 capabilities &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
253 }
254 if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) {
255 capabilities &= ~ICP_ACCEL_CAPABILITIES_SM3;
256 capabilities &= ~ICP_ACCEL_CAPABILITIES_SM4;
257 }
258 if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
259 capabilities &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
260 capabilities &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
261 capabilities &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
262 capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
263 }
264
265 return capabilities;
266 }
267
268 static u32
get_hb_clock(struct adf_hw_device_data * self)269 get_hb_clock(struct adf_hw_device_data *self)
270 {
271 /*
272 * 4XXX uses KPT counter for HB
273 */
274 return ADF_4XXX_KPT_COUNTER_FREQ;
275 }
276
277 static u32
get_ae_clock(struct adf_hw_device_data * self)278 get_ae_clock(struct adf_hw_device_data *self)
279 {
280 /*
281 * Clock update interval is <16> ticks for qat_4xxx.
282 */
283 return self->clock_frequency / 16;
284 }
285
286 static int
measure_clock(struct adf_accel_dev * accel_dev)287 measure_clock(struct adf_accel_dev *accel_dev)
288 {
289 u32 frequency;
290 int ret = 0;
291
292 ret = adf_dev_measure_clock(accel_dev,
293 &frequency,
294 ADF_4XXX_MIN_AE_FREQ,
295 ADF_4XXX_MAX_AE_FREQ);
296 if (ret)
297 return ret;
298
299 accel_dev->hw_device->clock_frequency = frequency;
300 return 0;
301 }
302
303 static int
adf_4xxx_configure_accel_units(struct adf_accel_dev * accel_dev)304 adf_4xxx_configure_accel_units(struct adf_accel_dev *accel_dev)
305 {
306 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES] = { 0 };
307 char val_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 };
308
309 if (adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC))
310 goto err;
311
312 snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
313 snprintf(val_str,
314 sizeof(val_str),
315 ADF_CFG_ASYM ADF_SERVICES_SEPARATOR ADF_CFG_SYM);
316
317 if (adf_cfg_add_key_value_param(
318 accel_dev, ADF_GENERAL_SEC, key, (void *)val_str, ADF_STR))
319 goto err;
320
321 return 0;
322 err:
323 device_printf(GET_DEV(accel_dev), "Failed to configure accel units\n");
324 return EINVAL;
325 }
326
327 static u32
get_num_accel_units(struct adf_hw_device_data * self)328 get_num_accel_units(struct adf_hw_device_data *self)
329 {
330 return ADF_4XXX_MAX_ACCELUNITS;
331 }
332
333 static void
get_accel_unit(struct adf_hw_device_data * self,struct adf_accel_unit ** accel_unit)334 get_accel_unit(struct adf_hw_device_data *self,
335 struct adf_accel_unit **accel_unit)
336 {
337 memcpy(*accel_unit, adf_4xxx_au_a_ae, sizeof(adf_4xxx_au_a_ae));
338 }
339
340 static void
adf_exit_accel_unit_services(struct adf_accel_dev * accel_dev)341 adf_exit_accel_unit_services(struct adf_accel_dev *accel_dev)
342 {
343 if (accel_dev->au_info) {
344 kfree(accel_dev->au_info->au);
345 accel_dev->au_info->au = NULL;
346 kfree(accel_dev->au_info);
347 accel_dev->au_info = NULL;
348 }
349 }
350
351 static int
get_accel_unit_config(struct adf_accel_dev * accel_dev,u8 * num_sym_au,u8 * num_dc_au,u8 * num_asym_au)352 get_accel_unit_config(struct adf_accel_dev *accel_dev,
353 u8 *num_sym_au,
354 u8 *num_dc_au,
355 u8 *num_asym_au)
356 {
357 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
358 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
359 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
360 u32 num_au = hw_data->get_num_accel_units(hw_data);
361 /* One AU will be allocated by default if a service enabled */
362 u32 alloc_au = 1;
363 /* There's always one AU that is used for Admin AE */
364 u32 service_mask = ADF_ACCEL_ADMIN;
365 char *token, *cur_str;
366 u32 disabled_caps = 0;
367
368 /* Get the services enabled by user */
369 snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
370 if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
371 return EFAULT;
372 cur_str = val;
373 token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
374 while (token) {
375 if (!strncmp(token, ADF_CFG_SYM, strlen(ADF_CFG_SYM)))
376 service_mask |= ADF_ACCEL_CRYPTO;
377 if (!strncmp(token, ADF_CFG_ASYM, strlen(ADF_CFG_ASYM)))
378 service_mask |= ADF_ACCEL_ASYM;
379
380 /* cy means both asym & crypto should be enabled
381 * Hardware resources allocation check will be done later
382 */
383 if (!strncmp(token, ADF_CFG_CY, strlen(ADF_CFG_CY)))
384 service_mask |= ADF_ACCEL_ASYM | ADF_ACCEL_CRYPTO;
385 if (!strncmp(token, ADF_SERVICE_DC, strlen(ADF_SERVICE_DC)))
386 service_mask |= ADF_ACCEL_COMPRESSION;
387
388 token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
389 }
390
391 /* Ensure the user won't enable more services than it can support */
392 if (hweight32(service_mask) > num_au) {
393 device_printf(GET_DEV(accel_dev),
394 "Can't enable more services than ");
395 device_printf(GET_DEV(accel_dev), "%d!\n", num_au);
396 return EFAULT;
397 } else if (hweight32(service_mask) == 2) {
398 /* Due to limitation, besides AU for Admin AE
399 * only 2 more AUs can be allocated
400 */
401 alloc_au = 2;
402 }
403
404 if (service_mask & ADF_ACCEL_CRYPTO)
405 *num_sym_au = alloc_au;
406 if (service_mask & ADF_ACCEL_ASYM)
407 *num_asym_au = alloc_au;
408 if (service_mask & ADF_ACCEL_COMPRESSION)
409 *num_dc_au = alloc_au;
410
411 /*update capability*/
412 if (!*num_sym_au || !(service_mask & ADF_ACCEL_CRYPTO)) {
413 disabled_caps = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
414 ICP_ACCEL_CAPABILITIES_CIPHER |
415 ICP_ACCEL_CAPABILITIES_SHA3 |
416 ICP_ACCEL_CAPABILITIES_SHA3_EXT |
417 ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_SM3 |
418 ICP_ACCEL_CAPABILITIES_SM4 |
419 ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
420 ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
421 ICP_ACCEL_CAPABILITIES_AES_V2 |
422 ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
423 }
424 if (!*num_asym_au || !(service_mask & ADF_ACCEL_ASYM)) {
425 disabled_caps |= ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
426 ICP_ACCEL_CAPABILITIES_ECEDMONT;
427 }
428 if (!*num_dc_au || !(service_mask & ADF_ACCEL_COMPRESSION)) {
429 disabled_caps |= ICP_ACCEL_CAPABILITIES_COMPRESSION |
430 ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
431 ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
432 ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
433 accel_dev->hw_device->extended_dc_capabilities = 0;
434 }
435 accel_dev->hw_device->accel_capabilities_mask =
436 adf_4xxx_get_hw_cap(accel_dev) & ~disabled_caps;
437
438 hw_data->service_mask = service_mask;
439 hw_data->service_to_load_mask = service_mask;
440
441 return 0;
442 }
443
444 static int
adf_init_accel_unit_services(struct adf_accel_dev * accel_dev)445 adf_init_accel_unit_services(struct adf_accel_dev *accel_dev)
446 {
447 u8 num_sym_au = 0, num_dc_au = 0, num_asym_au = 0;
448 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
449 u32 num_au = hw_data->get_num_accel_units(hw_data);
450 u32 au_size = num_au * sizeof(struct adf_accel_unit);
451 u8 i;
452
453 if (get_accel_unit_config(
454 accel_dev, &num_sym_au, &num_dc_au, &num_asym_au))
455 return EFAULT;
456
457 accel_dev->au_info = kzalloc(sizeof(*accel_dev->au_info), GFP_KERNEL);
458 if (!accel_dev->au_info)
459 return ENOMEM;
460
461 accel_dev->au_info->au = kzalloc(au_size, GFP_KERNEL);
462 if (!accel_dev->au_info->au) {
463 kfree(accel_dev->au_info);
464 accel_dev->au_info = NULL;
465 return ENOMEM;
466 }
467
468 accel_dev->au_info->num_cy_au = num_sym_au;
469 accel_dev->au_info->num_dc_au = num_dc_au;
470 accel_dev->au_info->num_asym_au = num_asym_au;
471
472 get_accel_unit(hw_data, &accel_dev->au_info->au);
473
474 /* Enable ASYM accel units */
475 for (i = 0; i < num_au && num_asym_au > 0; i++) {
476 if (accel_dev->au_info->au[i].services ==
477 ADF_ACCEL_SERVICE_NULL) {
478 accel_dev->au_info->au[i].services = ADF_ACCEL_ASYM;
479 num_asym_au--;
480 }
481 }
482 /* Enable SYM accel units */
483 for (i = 0; i < num_au && num_sym_au > 0; i++) {
484 if (accel_dev->au_info->au[i].services ==
485 ADF_ACCEL_SERVICE_NULL) {
486 accel_dev->au_info->au[i].services = ADF_ACCEL_CRYPTO;
487 num_sym_au--;
488 }
489 }
490 /* Enable compression accel units */
491 for (i = 0; i < num_au && num_dc_au > 0; i++) {
492 if (accel_dev->au_info->au[i].services ==
493 ADF_ACCEL_SERVICE_NULL) {
494 accel_dev->au_info->au[i].services =
495 ADF_ACCEL_COMPRESSION;
496 num_dc_au--;
497 }
498 }
499 accel_dev->au_info->dc_ae_msk |=
500 hw_data->get_obj_cfg_ae_mask(accel_dev, ADF_ACCEL_COMPRESSION);
501
502 return 0;
503 }
504
505 static int
adf_init_accel_units(struct adf_accel_dev * accel_dev)506 adf_init_accel_units(struct adf_accel_dev *accel_dev)
507 {
508 return adf_init_accel_unit_services(accel_dev);
509 }
510
511 static void
adf_exit_accel_units(struct adf_accel_dev * accel_dev)512 adf_exit_accel_units(struct adf_accel_dev *accel_dev)
513 {
514 /* reset the AU service */
515 adf_exit_accel_unit_services(accel_dev);
516 }
517
518 static const char *
get_obj_name(struct adf_accel_dev * accel_dev,enum adf_accel_unit_services service)519 get_obj_name(struct adf_accel_dev *accel_dev,
520 enum adf_accel_unit_services service)
521 {
522 switch (service) {
523 case ADF_ACCEL_ASYM:
524 return ADF_4XXX_ASYM_OBJ;
525 case ADF_ACCEL_CRYPTO:
526 return ADF_4XXX_SYM_OBJ;
527 case ADF_ACCEL_COMPRESSION:
528 return ADF_4XXX_DC_OBJ;
529 case ADF_ACCEL_ADMIN:
530 return ADF_4XXX_ADMIN_OBJ;
531 default:
532 return NULL;
533 }
534 }
535
536 static uint32_t
get_objs_num(struct adf_accel_dev * accel_dev)537 get_objs_num(struct adf_accel_dev *accel_dev)
538 {
539 return ADF_4XXX_MAX_OBJ;
540 }
541
542 static uint32_t
get_obj_cfg_ae_mask(struct adf_accel_dev * accel_dev,enum adf_accel_unit_services service)543 get_obj_cfg_ae_mask(struct adf_accel_dev *accel_dev,
544 enum adf_accel_unit_services service)
545 {
546 u32 ae_mask = 0;
547 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
548 u32 num_au = hw_data->get_num_accel_units(hw_data);
549 struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
550 u32 i = 0;
551
552 if (service == ADF_ACCEL_SERVICE_NULL)
553 return 0;
554
555 for (i = 0; i < num_au; i++) {
556 if (accel_unit[i].services == service)
557 ae_mask |= accel_unit[i].ae_mask;
558 }
559
560 return ae_mask;
561 }
562
563 static enum adf_accel_unit_services
adf_4xxx_get_service_type(struct adf_accel_dev * accel_dev,s32 obj_num)564 adf_4xxx_get_service_type(struct adf_accel_dev *accel_dev, s32 obj_num)
565 {
566 struct adf_accel_unit *accel_unit;
567 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
568 u8 num_au = hw_data->get_num_accel_units(hw_data);
569 int i;
570
571 if (!hw_data->service_to_load_mask)
572 return ADF_ACCEL_SERVICE_NULL;
573
574 if (accel_dev->au_info && accel_dev->au_info->au)
575 accel_unit = accel_dev->au_info->au;
576 else
577 return ADF_ACCEL_SERVICE_NULL;
578
579 for (i = num_au - 2; i >= 0; i--) {
580 if (hw_data->service_to_load_mask & accel_unit[i].services) {
581 hw_data->service_to_load_mask &=
582 ~accel_unit[i].services;
583 return accel_unit[i].services;
584 }
585 }
586
587 /* admin AE should be loaded last */
588 if (hw_data->service_to_load_mask & accel_unit[num_au - 1].services) {
589 hw_data->service_to_load_mask &=
590 ~accel_unit[num_au - 1].services;
591 return accel_unit[num_au - 1].services;
592 }
593
594 return ADF_ACCEL_SERVICE_NULL;
595 }
596
597 static void
get_ring_svc_map_data(int ring_pair_index,u16 ring_to_svc_map,u8 * serv_type,int * ring_index,int * num_rings_per_srv,int bundle_num)598 get_ring_svc_map_data(int ring_pair_index,
599 u16 ring_to_svc_map,
600 u8 *serv_type,
601 int *ring_index,
602 int *num_rings_per_srv,
603 int bundle_num)
604 {
605 *serv_type =
606 GET_SRV_TYPE(ring_to_svc_map, bundle_num % ADF_CFG_NUM_SERVICES);
607 *ring_index = 0;
608 *num_rings_per_srv = ADF_4XXX_NUM_RINGS_PER_BANK / 2;
609 }
610
611 static int
adf_get_dc_extcapabilities(struct adf_accel_dev * accel_dev,u32 * capabilities)612 adf_get_dc_extcapabilities(struct adf_accel_dev *accel_dev, u32 *capabilities)
613 {
614 struct icp_qat_fw_init_admin_req req;
615 struct icp_qat_fw_init_admin_resp resp;
616 u8 i;
617 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
618 u8 num_au = hw_data->get_num_accel_units(hw_data);
619 u32 first_dc_ae = 0;
620
621 for (i = 0; i < num_au; i++) {
622 if (accel_dev->au_info->au[i].services &
623 ADF_ACCEL_COMPRESSION) {
624 first_dc_ae = accel_dev->au_info->au[i].ae_mask;
625 first_dc_ae &= ~(first_dc_ae - 1);
626 }
627 }
628
629 memset(&req, 0, sizeof(req));
630 memset(&resp, 0, sizeof(resp));
631 req.cmd_id = ICP_QAT_FW_COMP_CAPABILITY_GET;
632
633 if (likely(first_dc_ae)) {
634 if (adf_send_admin(accel_dev, &req, &resp, first_dc_ae) ||
635 resp.status) {
636 *capabilities = 0;
637 return EFAULT;
638 }
639
640 *capabilities = resp.extended_features;
641 }
642
643 return 0;
644 }
645
646 static int
adf_get_fw_status(struct adf_accel_dev * accel_dev,u8 * major,u8 * minor,u8 * patch)647 adf_get_fw_status(struct adf_accel_dev *accel_dev,
648 u8 *major,
649 u8 *minor,
650 u8 *patch)
651 {
652 struct icp_qat_fw_init_admin_req req;
653 struct icp_qat_fw_init_admin_resp resp;
654 u32 ae_mask = 1;
655
656 memset(&req, 0, sizeof(req));
657 memset(&resp, 0, sizeof(resp));
658 req.cmd_id = ICP_QAT_FW_STATUS_GET;
659
660 if (adf_send_admin(accel_dev, &req, &resp, ae_mask))
661 return EFAULT;
662
663 *major = resp.version_major_num;
664 *minor = resp.version_minor_num;
665 *patch = resp.version_patch_num;
666
667 return 0;
668 }
669
670 static int
adf_4xxx_send_admin_init(struct adf_accel_dev * accel_dev)671 adf_4xxx_send_admin_init(struct adf_accel_dev *accel_dev)
672 {
673 int ret = 0;
674 struct icp_qat_fw_init_admin_req req;
675 struct icp_qat_fw_init_admin_resp resp;
676 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
677 u32 ae_mask = hw_data->ae_mask;
678 u32 admin_ae_mask = hw_data->admin_ae_mask;
679 u8 num_au = hw_data->get_num_accel_units(hw_data);
680 u8 i;
681 u32 dc_capabilities = 0;
682
683 for (i = 0; i < num_au; i++) {
684 if (accel_dev->au_info->au[i].services ==
685 ADF_ACCEL_SERVICE_NULL)
686 ae_mask &= ~accel_dev->au_info->au[i].ae_mask;
687
688 if (accel_dev->au_info->au[i].services != ADF_ACCEL_ADMIN)
689 admin_ae_mask &= ~accel_dev->au_info->au[i].ae_mask;
690 }
691
692 if (!accel_dev->admin) {
693 device_printf(GET_DEV(accel_dev), "adf_admin not available\n");
694 return EFAULT;
695 }
696
697 memset(&req, 0, sizeof(req));
698 memset(&resp, 0, sizeof(resp));
699
700 req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG;
701 req.init_cfg_sz = ADF_CONST_TABLE_SIZE;
702 req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
703 if (adf_send_admin(accel_dev, &req, &resp, admin_ae_mask)) {
704 device_printf(GET_DEV(accel_dev),
705 "Error sending constants config message\n");
706 return EFAULT;
707 }
708
709 memset(&req, 0, sizeof(req));
710 memset(&resp, 0, sizeof(resp));
711 req.cmd_id = ICP_QAT_FW_INIT_ME;
712 if (adf_send_admin(accel_dev, &req, &resp, ae_mask)) {
713 device_printf(GET_DEV(accel_dev),
714 "Error sending init message\n");
715 return EFAULT;
716 }
717
718 memset(&req, 0, sizeof(req));
719 memset(&resp, 0, sizeof(resp));
720 req.cmd_id = ICP_QAT_FW_HEARTBEAT_TIMER_SET;
721 req.init_cfg_ptr = accel_dev->admin->phy_hb_addr;
722 if (adf_get_hb_timer(accel_dev, &req.heartbeat_ticks))
723 return EINVAL;
724
725 if (adf_send_admin(accel_dev, &req, &resp, ae_mask))
726 device_printf(GET_DEV(accel_dev),
727 "Heartbeat is not supported\n");
728
729 ret = adf_get_dc_extcapabilities(accel_dev, &dc_capabilities);
730 if (unlikely(ret)) {
731 device_printf(GET_DEV(accel_dev),
732 "Could not get FW ext. capabilities\n");
733 }
734
735 accel_dev->hw_device->extended_dc_capabilities = dc_capabilities;
736
737 adf_get_fw_status(accel_dev,
738 &accel_dev->fw_versions.fw_version_major,
739 &accel_dev->fw_versions.fw_version_minor,
740 &accel_dev->fw_versions.fw_version_patch);
741
742 device_printf(GET_DEV(accel_dev),
743 "FW version: %d.%d.%d\n",
744 accel_dev->fw_versions.fw_version_major,
745 accel_dev->fw_versions.fw_version_minor,
746 accel_dev->fw_versions.fw_version_patch);
747
748 return ret;
749 }
750
751 static enum dev_sku_info
get_sku(struct adf_hw_device_data * self)752 get_sku(struct adf_hw_device_data *self)
753 {
754 return DEV_SKU_1;
755 }
756
757 static struct adf_accel_unit *
get_au_by_ae(struct adf_accel_dev * accel_dev,int ae_num)758 get_au_by_ae(struct adf_accel_dev *accel_dev, int ae_num)
759 {
760 int i = 0;
761 struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
762
763 if (!accel_unit)
764 return NULL;
765
766 for (i = 0; i < ADF_4XXX_MAX_ACCELUNITS; i++)
767 if (accel_unit[i].ae_mask & BIT(ae_num))
768 return &accel_unit[i];
769
770 return NULL;
771 }
772
773 static bool
check_accel_unit_service(enum adf_accel_unit_services au_srv,enum adf_cfg_service_type ring_srv)774 check_accel_unit_service(enum adf_accel_unit_services au_srv,
775 enum adf_cfg_service_type ring_srv)
776 {
777 if ((ADF_ACCEL_SERVICE_NULL == au_srv) && ring_srv == NA)
778 return true;
779 if ((au_srv & ADF_ACCEL_COMPRESSION) && ring_srv == COMP)
780 return true;
781 if ((au_srv & ADF_ACCEL_ASYM) && ring_srv == ASYM)
782 return true;
783 if ((au_srv & ADF_ACCEL_CRYPTO) && ring_srv == SYM)
784 return true;
785
786 return false;
787 }
788
789 static void
adf_4xxx_cfg_gen_dispatch_arbiter(struct adf_accel_dev * accel_dev,u32 * thrd_to_arb_map_gen)790 adf_4xxx_cfg_gen_dispatch_arbiter(struct adf_accel_dev *accel_dev,
791 u32 *thrd_to_arb_map_gen)
792 {
793 struct adf_accel_unit *au = NULL;
794 int engine = 0;
795 int thread = 0;
796 int service;
797 u16 ena_srv_mask;
798 u16 service_type;
799 u32 service_mask;
800 unsigned long thd_srv_mask = default_active_thd_mask;
801 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
802
803 ena_srv_mask = accel_dev->hw_device->ring_to_svc_map;
804 /* If ring_to_svc_map is not changed, return default arbiter value */
805 if (ena_srv_mask == ADF_4XXX_DEFAULT_RING_TO_SRV_MAP) {
806 memcpy(thrd_to_arb_map_gen,
807 thrd_to_arb_map,
808 sizeof(thrd_to_arb_map_gen[0]) *
809 ADF_4XXX_MAX_ACCELENGINES);
810 return;
811 }
812
813 for (engine = 0; engine < ADF_4XXX_MAX_ACCELENGINES - 1; engine++) {
814 thrd_to_arb_map_gen[engine] = 0;
815 service_mask = 0;
816 au = get_au_by_ae(accel_dev, engine);
817 if (!au)
818 continue;
819
820 for (service = 0; service < ADF_CFG_MAX_SERVICES; service++) {
821 service_type = GET_SRV_TYPE(ena_srv_mask, service);
822 if (check_accel_unit_service(au->services,
823 service_type))
824 service_mask |= BIT(service);
825 }
826
827 if (au->services == ADF_ACCEL_COMPRESSION)
828 thd_srv_mask = dc_me_active_thd_mask;
829 else if (au->services == ADF_ACCEL_ASYM)
830 thd_srv_mask = hw_data->asym_ae_active_thd_mask;
831 else
832 thd_srv_mask = default_active_thd_mask;
833
834 for_each_set_bit(thread, &thd_srv_mask, 8)
835 {
836 thrd_to_arb_map_gen[engine] |=
837 (service_mask << (ADF_CFG_MAX_SERVICES * thread));
838 }
839 }
840 }
841
842 static void
adf_get_arbiter_mapping(struct adf_accel_dev * accel_dev,u32 const ** arb_map_config)843 adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
844 u32 const **arb_map_config)
845 {
846 int i;
847 struct adf_hw_device_data *hw_device = accel_dev->hw_device;
848
849 for (i = 1; i < ADF_4XXX_MAX_ACCELENGINES; i++) {
850 if (~hw_device->ae_mask & (1 << i))
851 thrd_to_arb_map[i] = 0;
852 }
853 adf_4xxx_cfg_gen_dispatch_arbiter(accel_dev, thrd_to_arb_map_gen);
854 *arb_map_config = thrd_to_arb_map_gen;
855 }
856
857 static void
get_arb_info(struct arb_info * arb_info)858 get_arb_info(struct arb_info *arb_info)
859 {
860 arb_info->wrk_cfg_offset = ADF_4XXX_ARB_CONFIG;
861 arb_info->arbiter_offset = ADF_4XXX_ARB_OFFSET;
862 arb_info->wrk_thd_2_srv_arb_map = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET;
863 }
864
865 static void
get_admin_info(struct admin_info * admin_csrs_info)866 get_admin_info(struct admin_info *admin_csrs_info)
867 {
868 admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET;
869 admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET;
870 admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET;
871 }
872
873 static void
adf_enable_error_correction(struct adf_accel_dev * accel_dev)874 adf_enable_error_correction(struct adf_accel_dev *accel_dev)
875 {
876 struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
877 struct resource *csr = misc_bar->virt_addr;
878
879 /* Enable all in errsou3 except VFLR notification on host */
880 ADF_CSR_WR(csr, ADF_4XXX_ERRMSK3, ADF_4XXX_VFLNOTIFY);
881 }
882
883 static void
adf_enable_ints(struct adf_accel_dev * accel_dev)884 adf_enable_ints(struct adf_accel_dev *accel_dev)
885 {
886 struct resource *addr;
887
888 addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
889
890 /* Enable bundle interrupts */
891 ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0);
892 ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0);
893
894 /* Enable misc interrupts */
895 ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
896 }
897
898 static int
adf_init_device(struct adf_accel_dev * accel_dev)899 adf_init_device(struct adf_accel_dev *accel_dev)
900 {
901 struct resource *addr;
902 u32 status;
903 u32 csr;
904 int ret;
905
906 addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
907
908 /* Temporarily mask PM interrupt */
909 csr = ADF_CSR_RD(addr, ADF_4XXX_ERRMSK2);
910 csr |= ADF_4XXX_PM_SOU;
911 ADF_CSR_WR(addr, ADF_4XXX_ERRMSK2, csr);
912
913 /* Set DRV_ACTIVE bit to power up the device */
914 ADF_CSR_WR(addr, ADF_4XXX_PM_INTERRUPT, ADF_4XXX_PM_DRV_ACTIVE);
915
916 /* Poll status register to make sure the device is powered up */
917 status = 0;
918 ret = read_poll_timeout(ADF_CSR_RD,
919 status,
920 status & ADF_4XXX_PM_INIT_STATE,
921 ADF_4XXX_PM_POLL_DELAY_US,
922 ADF_4XXX_PM_POLL_TIMEOUT_US,
923 true,
924 addr,
925 ADF_4XXX_PM_STATUS);
926 if (ret)
927 device_printf(GET_DEV(accel_dev),
928 "Failed to power up the device\n");
929
930 return ret;
931 }
932
933 void
adf_init_hw_data_4xxx(struct adf_hw_device_data * hw_data,u32 id)934 adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 id)
935 {
936 hw_data->dev_class = &adf_4xxx_class;
937 hw_data->instance_id = adf_4xxx_class.instances++;
938 hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS;
939 hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK;
940 hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS;
941 hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
942 hw_data->num_logical_accel = 1;
943 hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET;
944 hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK;
945 hw_data->alloc_irq = adf_isr_resource_alloc;
946 hw_data->free_irq = adf_isr_resource_free;
947 hw_data->enable_error_correction = adf_enable_error_correction;
948 hw_data->get_accel_mask = get_accel_mask;
949 hw_data->get_ae_mask = get_ae_mask;
950 hw_data->get_num_accels = get_num_accels;
951 hw_data->get_num_aes = get_num_aes;
952 hw_data->get_sram_bar_id = get_sram_bar_id;
953 hw_data->get_etr_bar_id = get_etr_bar_id;
954 hw_data->get_misc_bar_id = get_misc_bar_id;
955 hw_data->get_arb_info = get_arb_info;
956 hw_data->get_admin_info = get_admin_info;
957 hw_data->get_accel_cap = adf_4xxx_get_hw_cap;
958 hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
959 hw_data->get_sku = get_sku;
960 hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE;
961 hw_data->fw_name = ADF_4XXX_FW;
962 hw_data->fw_mmp_name = ADF_4XXX_MMP;
963 hw_data->init_admin_comms = adf_init_admin_comms;
964 hw_data->exit_admin_comms = adf_exit_admin_comms;
965 hw_data->send_admin_init = adf_4xxx_send_admin_init;
966 hw_data->init_arb = adf_init_gen2_arb;
967 hw_data->exit_arb = adf_exit_arb;
968 hw_data->get_arb_mapping = adf_get_arbiter_mapping;
969 hw_data->enable_ints = adf_enable_ints;
970 hw_data->init_device = adf_init_device;
971 hw_data->reset_device = adf_reset_flr;
972 hw_data->restore_device = adf_dev_restore;
973 hw_data->init_accel_units = adf_init_accel_units;
974 hw_data->exit_accel_units = adf_exit_accel_units;
975 hw_data->get_num_accel_units = get_num_accel_units;
976 hw_data->configure_accel_units = adf_4xxx_configure_accel_units;
977 hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
978 hw_data->get_ring_svc_map_data = get_ring_svc_map_data;
979 hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
980 hw_data->get_objs_num = get_objs_num;
981 hw_data->get_obj_name = get_obj_name;
982 hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask;
983 hw_data->get_service_type = adf_4xxx_get_service_type;
984 hw_data->set_msix_rttable = set_msix_default_rttable;
985 hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
986 hw_data->disable_iov = adf_disable_sriov;
987 hw_data->config_device = adf_config_device;
988 hw_data->set_asym_rings_mask = adf_set_asym_rings_mask;
989 hw_data->get_hb_clock = get_hb_clock;
990 hw_data->int_timer_init = adf_int_timer_init;
991 hw_data->int_timer_exit = adf_int_timer_exit;
992 hw_data->get_heartbeat_status = adf_get_heartbeat_status;
993 hw_data->get_ae_clock = get_ae_clock;
994 hw_data->measure_clock = measure_clock;
995 hw_data->query_storage_cap = 1;
996 hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
997
998 switch (id) {
999 case ADF_401XX_PCI_DEVICE_ID:
1000 hw_data->asym_ae_active_thd_mask = DEFAULT_401XX_ASYM_AE_MASK;
1001 break;
1002 case ADF_4XXX_PCI_DEVICE_ID:
1003 default:
1004 hw_data->asym_ae_active_thd_mask = DEFAULT_4XXX_ASYM_AE_MASK;
1005 }
1006
1007 adf_gen4_init_hw_csr_info(&hw_data->csr_info);
1008 adf_gen4_init_pf_pfvf_ops(&hw_data->csr_info.pfvf_ops);
1009 }
1010
1011 void
adf_clean_hw_data_4xxx(struct adf_hw_device_data * hw_data)1012 adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
1013 {
1014 hw_data->dev_class->instances--;
1015 }
1016