xref: /freebsd/sys/dev/qat/qat_hw/qat_4xxx/adf_4xxx_hw_data.c (revision 7fbd362c091b2b384d14c3ed5af9234bb2eff9c2)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2025 Intel Corporation */
3 #include <linux/iopoll.h>
4 #include <adf_accel_devices.h>
5 #include <adf_cfg.h>
6 #include <adf_common_drv.h>
7 #include <adf_dev_err.h>
8 #include <adf_pfvf_msg.h>
9 #include <adf_gen4_hw_data.h>
10 #include <adf_gen4_pfvf.h>
11 #include <adf_gen4_timer.h>
12 #include "adf_4xxx_hw_data.h"
13 #include "adf_heartbeat.h"
14 #include "icp_qat_fw_init_admin.h"
15 #include "icp_qat_hw.h"
16 
17 #define ADF_CONST_TABLE_SIZE 1024
18 
19 struct adf_fw_config {
20 	u32 ae_mask;
21 	char *obj_name;
22 };
23 
24 /* Accel unit information */
25 static const struct adf_accel_unit adf_4xxx_au_a_ae[] = {
26 	{ 0x1, 0x1, 0xF, 0x1B, 4, ADF_ACCEL_SERVICE_NULL },
27 	{ 0x2, 0x1, 0xF0, 0x6C0, 4, ADF_ACCEL_SERVICE_NULL },
28 	{ 0x4, 0x1, 0x100, 0xF000, 1, ADF_ACCEL_ADMIN },
29 };
30 
31 /* Worker thread to service arbiter mappings */
32 static u32 thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = { 0x5555555, 0x5555555,
33 							  0x5555555, 0x5555555,
34 							  0xAAAAAAA, 0xAAAAAAA,
35 							  0xAAAAAAA, 0xAAAAAAA,
36 							  0x0 };
37 
38 /* Masks representing ME thread-service mappings.
39  * Thread 7 carries out Admin work and is thus
40  * left out.
41  */
42 static u8 default_active_thd_mask = 0x7F;
43 static u8 dc_me_active_thd_mask = 0x03;
44 
45 static u32 thrd_to_arb_map_gen[ADF_4XXX_MAX_ACCELENGINES] = { 0 };
46 
47 #define ADF_4XXX_ASYM_SYM                                                      \
48 	(ASYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT |                        \
49 	 ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT |                              \
50 	 SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
51 
52 #define ADF_4XXX_DC                                                            \
53 	(COMP | COMP << ADF_CFG_SERV_RING_PAIR_1_SHIFT |                       \
54 	 COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT |                              \
55 	 COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
56 
57 #define ADF_4XXX_SYM                                                           \
58 	(SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT |                         \
59 	 SYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT |                               \
60 	 SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
61 
62 #define ADF_4XXX_ASYM                                                          \
63 	(ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT |                       \
64 	 ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT |                              \
65 	 ASYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
66 
67 #define ADF_4XXX_ASYM_DC                                                       \
68 	(ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT |                       \
69 	 COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT |                              \
70 	 COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
71 
72 #define ADF_4XXX_SYM_DC                                                        \
73 	(SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT |                         \
74 	 COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT |                              \
75 	 COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
76 
77 #define ADF_4XXX_NA                                                            \
78 	(NA | NA << ADF_CFG_SERV_RING_PAIR_1_SHIFT |                           \
79 	 NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT |                                \
80 	 NA << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
81 
82 #define ADF_4XXX_DEFAULT_RING_TO_SRV_MAP ADF_4XXX_ASYM_SYM
83 
84 struct adf_enabled_services {
85 	const char svcs_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
86 	u16 rng_to_svc_msk;
87 };
88 
89 static struct adf_enabled_services adf_4xxx_svcs[] =
90     { { "dc", ADF_4XXX_DC },
91       { "sym", ADF_4XXX_SYM },
92       { "asym", ADF_4XXX_ASYM },
93       { "dc;asym", ADF_4XXX_ASYM_DC },
94       { "asym;dc", ADF_4XXX_ASYM_DC },
95       { "sym;dc", ADF_4XXX_SYM_DC },
96       { "dc;sym", ADF_4XXX_SYM_DC },
97       { "asym;sym", ADF_4XXX_ASYM_SYM },
98       { "sym;asym", ADF_4XXX_ASYM_SYM },
99       { "cy", ADF_4XXX_ASYM_SYM } };
100 
101 static struct adf_hw_device_class adf_4xxx_class = {
102 	.name = ADF_4XXX_DEVICE_NAME,
103 	.type = DEV_4XXX,
104 	.instances = 0,
105 };
106 
107 static u32
get_accel_mask(struct adf_accel_dev * accel_dev)108 get_accel_mask(struct adf_accel_dev *accel_dev)
109 {
110 	return ADF_4XXX_ACCELERATORS_MASK;
111 }
112 
113 static u32
get_ae_mask(struct adf_accel_dev * accel_dev)114 get_ae_mask(struct adf_accel_dev *accel_dev)
115 {
116 	u32 fusectl4 = accel_dev->hw_device->fuses;
117 
118 	return ~fusectl4 & ADF_4XXX_ACCELENGINES_MASK;
119 }
120 
121 static void
adf_set_asym_rings_mask(struct adf_accel_dev * accel_dev)122 adf_set_asym_rings_mask(struct adf_accel_dev *accel_dev)
123 {
124 	accel_dev->hw_device->asym_rings_mask = ADF_4XXX_DEF_ASYM_MASK;
125 }
126 
127 static int
get_ring_to_svc_map(struct adf_accel_dev * accel_dev,u16 * ring_to_svc_map)128 get_ring_to_svc_map(struct adf_accel_dev *accel_dev, u16 *ring_to_svc_map)
129 {
130 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
131 	char val[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
132 	u32 i = 0;
133 
134 	*ring_to_svc_map = 0;
135 	/* Get the services enabled by user */
136 	snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
137 	if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
138 		return EFAULT;
139 
140 	for (i = 0; i < ARRAY_SIZE(adf_4xxx_svcs); i++) {
141 		if (!strncmp(val,
142 			     adf_4xxx_svcs[i].svcs_enabled,
143 			     ADF_CFG_MAX_KEY_LEN_IN_BYTES)) {
144 			*ring_to_svc_map = adf_4xxx_svcs[i].rng_to_svc_msk;
145 			return 0;
146 		}
147 	}
148 
149 	device_printf(GET_DEV(accel_dev),
150 		      "Invalid services enabled: %s\n",
151 		      val);
152 	return EFAULT;
153 }
154 
155 static u32
get_num_accels(struct adf_hw_device_data * self)156 get_num_accels(struct adf_hw_device_data *self)
157 {
158 	return ADF_4XXX_MAX_ACCELERATORS;
159 }
160 
161 static u32
get_num_aes(struct adf_hw_device_data * self)162 get_num_aes(struct adf_hw_device_data *self)
163 {
164 	if (!self || !self->ae_mask)
165 		return 0;
166 
167 	return hweight32(self->ae_mask);
168 }
169 
170 static u32
get_misc_bar_id(struct adf_hw_device_data * self)171 get_misc_bar_id(struct adf_hw_device_data *self)
172 {
173 	return ADF_4XXX_PMISC_BAR;
174 }
175 
176 static u32
get_etr_bar_id(struct adf_hw_device_data * self)177 get_etr_bar_id(struct adf_hw_device_data *self)
178 {
179 	return ADF_4XXX_ETR_BAR;
180 }
181 
182 static u32
get_sram_bar_id(struct adf_hw_device_data * self)183 get_sram_bar_id(struct adf_hw_device_data *self)
184 {
185 	return ADF_4XXX_SRAM_BAR;
186 }
187 
188 /*
189  * The vector routing table is used to select the MSI-X entry to use for each
190  * interrupt source.
191  * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts.
192  * The final entry corresponds to VF2PF or error interrupts.
193  * This vector table could be used to configure one MSI-X entry to be shared
194  * between multiple interrupt sources.
195  *
196  * The default routing is set to have a one to one correspondence between the
197  * interrupt source and the MSI-X entry used.
198  */
199 static void
set_msix_default_rttable(struct adf_accel_dev * accel_dev)200 set_msix_default_rttable(struct adf_accel_dev *accel_dev)
201 {
202 	struct resource *csr;
203 	int i;
204 
205 	csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
206 	for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++)
207 		ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i);
208 }
209 
210 static u32
adf_4xxx_get_hw_cap(struct adf_accel_dev * accel_dev)211 adf_4xxx_get_hw_cap(struct adf_accel_dev *accel_dev)
212 {
213 	device_t pdev = accel_dev->accel_pci_dev.pci_dev;
214 	u32 fusectl1;
215 	u32 capabilities_sym, capabilities_sym_cipher, capabilities_sym_auth,
216 	    capabilities_asym, capabilities_dc, capabilities_other;
217 
218 	capabilities_other = ICP_ACCEL_CAPABILITIES_RL;
219 
220 	/* Read accelerator capabilities mask */
221 	fusectl1 = pci_read_config(pdev, ADF_4XXX_FUSECTL1_OFFSET, 4);
222 
223 	capabilities_sym_cipher = ICP_ACCEL_CAPABILITIES_HKDF |
224 	    ICP_ACCEL_CAPABILITIES_SM4 | ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
225 	    ICP_ACCEL_CAPABILITIES_AESGCM_SPC | ICP_ACCEL_CAPABILITIES_AES_V2;
226 	capabilities_sym_auth = ICP_ACCEL_CAPABILITIES_SM3 |
227 	    ICP_ACCEL_CAPABILITIES_SHA3 | ICP_ACCEL_CAPABILITIES_SHA3_EXT;
228 
229 	/* A set bit in fusectl1 means the feature is OFF in this SKU */
230 	if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) {
231 		capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_HKDF;
232 		capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_SM4;
233 	}
234 
235 	if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) {
236 		capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
237 		capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
238 		capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
239 	}
240 
241 	if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) {
242 		capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SM3;
243 		capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SHA3;
244 		capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
245 	}
246 
247 	if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) {
248 		capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_SM4;
249 		capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SM3;
250 	}
251 
252 	if (capabilities_sym_cipher)
253 		capabilities_sym_cipher |= ICP_ACCEL_CAPABILITIES_CIPHER;
254 
255 	if (capabilities_sym_auth)
256 		capabilities_sym_auth |= ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
257 
258 	capabilities_sym = capabilities_sym_cipher | capabilities_sym_auth;
259 
260 	if (capabilities_sym)
261 		capabilities_sym |= ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
262 
263 	capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
264 	    ICP_ACCEL_CAPABILITIES_SM2 | ICP_ACCEL_CAPABILITIES_ECEDMONT;
265 
266 	if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) {
267 		capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
268 		capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
269 		capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
270 	}
271 
272 	capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
273 	    ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
274 	    ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
275 	    ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
276 
277 	if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) {
278 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
279 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
280 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
281 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
282 	}
283 
284 	return capabilities_sym | capabilities_dc | capabilities_asym |
285 	    capabilities_other;
286 }
287 
288 static u32
get_hb_clock(struct adf_hw_device_data * self)289 get_hb_clock(struct adf_hw_device_data *self)
290 {
291 	/*
292 	 * 4XXX uses KPT counter for HB
293 	 */
294 	return ADF_4XXX_KPT_COUNTER_FREQ;
295 }
296 
297 static u32
get_ae_clock(struct adf_hw_device_data * self)298 get_ae_clock(struct adf_hw_device_data *self)
299 {
300 	/*
301 	 * Clock update interval is <16> ticks for qat_4xxx.
302 	 */
303 	return self->clock_frequency / 16;
304 }
305 
306 static int
measure_clock(struct adf_accel_dev * accel_dev)307 measure_clock(struct adf_accel_dev *accel_dev)
308 {
309 	u32 frequency;
310 	int ret = 0;
311 
312 	ret = adf_dev_measure_clock(accel_dev,
313 				    &frequency,
314 				    ADF_4XXX_MIN_AE_FREQ,
315 				    ADF_4XXX_MAX_AE_FREQ);
316 	if (ret)
317 		return ret;
318 
319 	accel_dev->hw_device->clock_frequency = frequency;
320 	return 0;
321 }
322 
323 static int
adf_4xxx_configure_accel_units(struct adf_accel_dev * accel_dev)324 adf_4xxx_configure_accel_units(struct adf_accel_dev *accel_dev)
325 {
326 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES] = { 0 };
327 	char val_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 };
328 
329 	if (adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC))
330 		goto err;
331 
332 	snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
333 	snprintf(val_str,
334 		 sizeof(val_str),
335 		 ADF_CFG_ASYM ADF_SERVICES_SEPARATOR ADF_CFG_SYM);
336 
337 	if (adf_cfg_add_key_value_param(
338 		accel_dev, ADF_GENERAL_SEC, key, (void *)val_str, ADF_STR))
339 		goto err;
340 
341 	return 0;
342 err:
343 	device_printf(GET_DEV(accel_dev), "Failed to configure accel units\n");
344 	return EINVAL;
345 }
346 
347 static u32
get_num_accel_units(struct adf_hw_device_data * self)348 get_num_accel_units(struct adf_hw_device_data *self)
349 {
350 	return ADF_4XXX_MAX_ACCELUNITS;
351 }
352 
353 static void
get_accel_unit(struct adf_hw_device_data * self,struct adf_accel_unit ** accel_unit)354 get_accel_unit(struct adf_hw_device_data *self,
355 	       struct adf_accel_unit **accel_unit)
356 {
357 	memcpy(*accel_unit, adf_4xxx_au_a_ae, sizeof(adf_4xxx_au_a_ae));
358 }
359 
360 static void
adf_exit_accel_unit_services(struct adf_accel_dev * accel_dev)361 adf_exit_accel_unit_services(struct adf_accel_dev *accel_dev)
362 {
363 	if (accel_dev->au_info) {
364 		kfree(accel_dev->au_info->au);
365 		accel_dev->au_info->au = NULL;
366 		kfree(accel_dev->au_info);
367 		accel_dev->au_info = NULL;
368 	}
369 }
370 
371 static int
get_accel_unit_config(struct adf_accel_dev * accel_dev,u8 * num_sym_au,u8 * num_dc_au,u8 * num_asym_au)372 get_accel_unit_config(struct adf_accel_dev *accel_dev,
373 		      u8 *num_sym_au,
374 		      u8 *num_dc_au,
375 		      u8 *num_asym_au)
376 {
377 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
378 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
379 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
380 	u32 num_au = hw_data->get_num_accel_units(hw_data);
381 	/* One AU will be allocated by default if a service enabled */
382 	u32 alloc_au = 1;
383 	/* There's always one AU that is used for Admin AE */
384 	u32 service_mask = ADF_ACCEL_ADMIN;
385 	char *token, *cur_str;
386 	u32 disabled_caps = 0;
387 
388 	/* Get the services enabled by user */
389 	snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
390 	if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
391 		return EFAULT;
392 	cur_str = val;
393 	token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
394 	while (token) {
395 		if (!strncmp(token, ADF_CFG_SYM, strlen(ADF_CFG_SYM)))
396 			service_mask |= ADF_ACCEL_CRYPTO;
397 		if (!strncmp(token, ADF_CFG_ASYM, strlen(ADF_CFG_ASYM)))
398 			service_mask |= ADF_ACCEL_ASYM;
399 
400 		/* cy means both asym & crypto should be enabled
401 		 * Hardware resources allocation check will be done later
402 		 */
403 		if (!strncmp(token, ADF_CFG_CY, strlen(ADF_CFG_CY)))
404 			service_mask |= ADF_ACCEL_ASYM | ADF_ACCEL_CRYPTO;
405 		if (!strncmp(token, ADF_SERVICE_DC, strlen(ADF_SERVICE_DC)))
406 			service_mask |= ADF_ACCEL_COMPRESSION;
407 
408 		token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
409 	}
410 
411 	/* Ensure the user won't enable more services than it can support */
412 	if (hweight32(service_mask) > num_au) {
413 		device_printf(GET_DEV(accel_dev),
414 			      "Can't enable more services than ");
415 		device_printf(GET_DEV(accel_dev), "%d!\n", num_au);
416 		return EFAULT;
417 	} else if (hweight32(service_mask) == 2) {
418 		/* Due to limitation, besides AU for Admin AE
419 		 * only 2 more AUs can be allocated
420 		 */
421 		alloc_au = 2;
422 	}
423 
424 	if (service_mask & ADF_ACCEL_CRYPTO)
425 		*num_sym_au = alloc_au;
426 	if (service_mask & ADF_ACCEL_ASYM)
427 		*num_asym_au = alloc_au;
428 	if (service_mask & ADF_ACCEL_COMPRESSION)
429 		*num_dc_au = alloc_au;
430 
431 	/*update capability*/
432 	if (!*num_sym_au || !(service_mask & ADF_ACCEL_CRYPTO)) {
433 		disabled_caps = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
434 		    ICP_ACCEL_CAPABILITIES_CIPHER |
435 		    ICP_ACCEL_CAPABILITIES_SHA3 |
436 		    ICP_ACCEL_CAPABILITIES_SHA3_EXT |
437 		    ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_SM3 |
438 		    ICP_ACCEL_CAPABILITIES_SM4 |
439 		    ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
440 		    ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
441 		    ICP_ACCEL_CAPABILITIES_AES_V2 |
442 		    ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
443 	}
444 	if (!*num_asym_au || !(service_mask & ADF_ACCEL_ASYM)) {
445 		disabled_caps |= ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
446 		    ICP_ACCEL_CAPABILITIES_ECEDMONT;
447 	}
448 	if (!*num_dc_au || !(service_mask & ADF_ACCEL_COMPRESSION)) {
449 		disabled_caps |= ICP_ACCEL_CAPABILITIES_COMPRESSION |
450 		    ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
451 		    ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
452 		    ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
453 		accel_dev->hw_device->extended_dc_capabilities = 0;
454 	}
455 	accel_dev->hw_device->accel_capabilities_mask =
456 	    adf_4xxx_get_hw_cap(accel_dev) & ~disabled_caps;
457 
458 	hw_data->service_mask = service_mask;
459 	hw_data->service_to_load_mask = service_mask;
460 
461 	return 0;
462 }
463 
464 static int
adf_init_accel_unit_services(struct adf_accel_dev * accel_dev)465 adf_init_accel_unit_services(struct adf_accel_dev *accel_dev)
466 {
467 	u8 num_sym_au = 0, num_dc_au = 0, num_asym_au = 0;
468 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
469 	u32 num_au = hw_data->get_num_accel_units(hw_data);
470 	u32 au_size = num_au * sizeof(struct adf_accel_unit);
471 	u8 i;
472 
473 	if (get_accel_unit_config(
474 		accel_dev, &num_sym_au, &num_dc_au, &num_asym_au))
475 		return EFAULT;
476 
477 	accel_dev->au_info = kzalloc(sizeof(*accel_dev->au_info), GFP_KERNEL);
478 	if (!accel_dev->au_info)
479 		return ENOMEM;
480 
481 	accel_dev->au_info->au = kzalloc(au_size, GFP_KERNEL);
482 	if (!accel_dev->au_info->au) {
483 		kfree(accel_dev->au_info);
484 		accel_dev->au_info = NULL;
485 		return ENOMEM;
486 	}
487 
488 	accel_dev->au_info->num_cy_au = num_sym_au;
489 	accel_dev->au_info->num_dc_au = num_dc_au;
490 	accel_dev->au_info->num_asym_au = num_asym_au;
491 
492 	get_accel_unit(hw_data, &accel_dev->au_info->au);
493 
494 	/* Enable ASYM accel units */
495 	for (i = 0; i < num_au && num_asym_au > 0; i++) {
496 		if (accel_dev->au_info->au[i].services ==
497 		    ADF_ACCEL_SERVICE_NULL) {
498 			accel_dev->au_info->au[i].services = ADF_ACCEL_ASYM;
499 			num_asym_au--;
500 		}
501 	}
502 	/* Enable SYM accel units */
503 	for (i = 0; i < num_au && num_sym_au > 0; i++) {
504 		if (accel_dev->au_info->au[i].services ==
505 		    ADF_ACCEL_SERVICE_NULL) {
506 			accel_dev->au_info->au[i].services = ADF_ACCEL_CRYPTO;
507 			num_sym_au--;
508 		}
509 	}
510 	/* Enable compression accel units */
511 	for (i = 0; i < num_au && num_dc_au > 0; i++) {
512 		if (accel_dev->au_info->au[i].services ==
513 		    ADF_ACCEL_SERVICE_NULL) {
514 			accel_dev->au_info->au[i].services =
515 			    ADF_ACCEL_COMPRESSION;
516 			num_dc_au--;
517 		}
518 	}
519 	accel_dev->au_info->dc_ae_msk |=
520 	    hw_data->get_obj_cfg_ae_mask(accel_dev, ADF_ACCEL_COMPRESSION);
521 
522 	return 0;
523 }
524 
525 static int
adf_init_accel_units(struct adf_accel_dev * accel_dev)526 adf_init_accel_units(struct adf_accel_dev *accel_dev)
527 {
528 	return adf_init_accel_unit_services(accel_dev);
529 }
530 
531 static void
adf_exit_accel_units(struct adf_accel_dev * accel_dev)532 adf_exit_accel_units(struct adf_accel_dev *accel_dev)
533 {
534 	/* reset the AU service */
535 	adf_exit_accel_unit_services(accel_dev);
536 }
537 
538 static const char *
get_obj_name_4xxx(struct adf_accel_dev * accel_dev,enum adf_accel_unit_services service)539 get_obj_name_4xxx(struct adf_accel_dev *accel_dev,
540 		  enum adf_accel_unit_services service)
541 {
542 	switch (service) {
543 	case ADF_ACCEL_ASYM:
544 		return ADF_4XXX_ASYM_OBJ;
545 	case ADF_ACCEL_CRYPTO:
546 		return ADF_4XXX_SYM_OBJ;
547 	case ADF_ACCEL_COMPRESSION:
548 		return ADF_4XXX_DC_OBJ;
549 	case ADF_ACCEL_ADMIN:
550 		return ADF_4XXX_ADMIN_OBJ;
551 	default:
552 		return NULL;
553 	}
554 }
555 
556 static const char *
get_obj_name_402xx(struct adf_accel_dev * accel_dev,enum adf_accel_unit_services service)557 get_obj_name_402xx(struct adf_accel_dev *accel_dev,
558 		   enum adf_accel_unit_services service)
559 {
560 	switch (service) {
561 	case ADF_ACCEL_ASYM:
562 		return ADF_402XX_ASYM_OBJ;
563 	case ADF_ACCEL_CRYPTO:
564 		return ADF_402XX_SYM_OBJ;
565 	case ADF_ACCEL_COMPRESSION:
566 		return ADF_402XX_DC_OBJ;
567 	case ADF_ACCEL_ADMIN:
568 		return ADF_402XX_ADMIN_OBJ;
569 	default:
570 		return NULL;
571 	}
572 }
573 
574 static uint32_t
get_objs_num(struct adf_accel_dev * accel_dev)575 get_objs_num(struct adf_accel_dev *accel_dev)
576 {
577 	return ADF_4XXX_MAX_OBJ;
578 }
579 
580 static uint32_t
get_obj_cfg_ae_mask(struct adf_accel_dev * accel_dev,enum adf_accel_unit_services service)581 get_obj_cfg_ae_mask(struct adf_accel_dev *accel_dev,
582 		    enum adf_accel_unit_services service)
583 {
584 	u32 ae_mask = 0;
585 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
586 	u32 num_au = hw_data->get_num_accel_units(hw_data);
587 	struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
588 	u32 i = 0;
589 
590 	if (service == ADF_ACCEL_SERVICE_NULL)
591 		return 0;
592 
593 	for (i = 0; i < num_au; i++) {
594 		if (accel_unit[i].services == service)
595 			ae_mask |= accel_unit[i].ae_mask;
596 	}
597 
598 	return ae_mask;
599 }
600 
601 static enum adf_accel_unit_services
adf_4xxx_get_service_type(struct adf_accel_dev * accel_dev,s32 obj_num)602 adf_4xxx_get_service_type(struct adf_accel_dev *accel_dev, s32 obj_num)
603 {
604 	struct adf_accel_unit *accel_unit;
605 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
606 	u8 num_au = hw_data->get_num_accel_units(hw_data);
607 	int i;
608 
609 	if (!hw_data->service_to_load_mask)
610 		return ADF_ACCEL_SERVICE_NULL;
611 
612 	if (accel_dev->au_info && accel_dev->au_info->au)
613 		accel_unit = accel_dev->au_info->au;
614 	else
615 		return ADF_ACCEL_SERVICE_NULL;
616 
617 	for (i = num_au - 2; i >= 0; i--) {
618 		if (hw_data->service_to_load_mask & accel_unit[i].services) {
619 			hw_data->service_to_load_mask &=
620 			    ~accel_unit[i].services;
621 			return accel_unit[i].services;
622 		}
623 	}
624 
625 	/* admin AE should be loaded last */
626 	if (hw_data->service_to_load_mask & accel_unit[num_au - 1].services) {
627 		hw_data->service_to_load_mask &=
628 		    ~accel_unit[num_au - 1].services;
629 		return accel_unit[num_au - 1].services;
630 	}
631 
632 	return ADF_ACCEL_SERVICE_NULL;
633 }
634 
635 static void
get_ring_svc_map_data(int ring_pair_index,u16 ring_to_svc_map,u8 * serv_type,int * ring_index,int * num_rings_per_srv,int bundle_num)636 get_ring_svc_map_data(int ring_pair_index,
637 		      u16 ring_to_svc_map,
638 		      u8 *serv_type,
639 		      int *ring_index,
640 		      int *num_rings_per_srv,
641 		      int bundle_num)
642 {
643 	*serv_type =
644 	    GET_SRV_TYPE(ring_to_svc_map, bundle_num % ADF_CFG_NUM_SERVICES);
645 	*ring_index = 0;
646 	*num_rings_per_srv = ADF_4XXX_NUM_RINGS_PER_BANK / 2;
647 }
648 
649 static int
adf_get_dc_extcapabilities(struct adf_accel_dev * accel_dev,u32 * capabilities)650 adf_get_dc_extcapabilities(struct adf_accel_dev *accel_dev, u32 *capabilities)
651 {
652 	struct icp_qat_fw_init_admin_req req;
653 	struct icp_qat_fw_init_admin_resp resp;
654 	u8 i;
655 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
656 	u8 num_au = hw_data->get_num_accel_units(hw_data);
657 	u32 first_dc_ae = 0;
658 
659 	for (i = 0; i < num_au; i++) {
660 		if (accel_dev->au_info->au[i].services &
661 		    ADF_ACCEL_COMPRESSION) {
662 			first_dc_ae = accel_dev->au_info->au[i].ae_mask;
663 			first_dc_ae &= ~(first_dc_ae - 1);
664 		}
665 	}
666 
667 	memset(&req, 0, sizeof(req));
668 	memset(&resp, 0, sizeof(resp));
669 	req.cmd_id = ICP_QAT_FW_COMP_CAPABILITY_GET;
670 
671 	if (likely(first_dc_ae)) {
672 		if (adf_send_admin(accel_dev, &req, &resp, first_dc_ae) ||
673 		    resp.status) {
674 			*capabilities = 0;
675 			return EFAULT;
676 		}
677 
678 		*capabilities = resp.extended_features;
679 	}
680 
681 	return 0;
682 }
683 
684 static int
adf_get_fw_status(struct adf_accel_dev * accel_dev,u8 * major,u8 * minor,u8 * patch)685 adf_get_fw_status(struct adf_accel_dev *accel_dev,
686 		  u8 *major,
687 		  u8 *minor,
688 		  u8 *patch)
689 {
690 	struct icp_qat_fw_init_admin_req req;
691 	struct icp_qat_fw_init_admin_resp resp;
692 	u32 ae_mask = 1;
693 
694 	memset(&req, 0, sizeof(req));
695 	memset(&resp, 0, sizeof(resp));
696 	req.cmd_id = ICP_QAT_FW_STATUS_GET;
697 
698 	if (adf_send_admin(accel_dev, &req, &resp, ae_mask))
699 		return EFAULT;
700 
701 	*major = resp.version_major_num;
702 	*minor = resp.version_minor_num;
703 	*patch = resp.version_patch_num;
704 
705 	return 0;
706 }
707 
708 static int
adf_4xxx_send_admin_init(struct adf_accel_dev * accel_dev)709 adf_4xxx_send_admin_init(struct adf_accel_dev *accel_dev)
710 {
711 	int ret = 0;
712 	struct icp_qat_fw_init_admin_req req;
713 	struct icp_qat_fw_init_admin_resp resp;
714 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
715 	u32 ae_mask = hw_data->ae_mask;
716 	u32 admin_ae_mask = hw_data->admin_ae_mask;
717 	u8 num_au = hw_data->get_num_accel_units(hw_data);
718 	u8 i;
719 	u32 dc_capabilities = 0;
720 
721 	for (i = 0; i < num_au; i++) {
722 		if (accel_dev->au_info->au[i].services ==
723 		    ADF_ACCEL_SERVICE_NULL)
724 			ae_mask &= ~accel_dev->au_info->au[i].ae_mask;
725 
726 		if (accel_dev->au_info->au[i].services != ADF_ACCEL_ADMIN)
727 			admin_ae_mask &= ~accel_dev->au_info->au[i].ae_mask;
728 	}
729 
730 	if (!accel_dev->admin) {
731 		device_printf(GET_DEV(accel_dev), "adf_admin not available\n");
732 		return EFAULT;
733 	}
734 
735 	memset(&req, 0, sizeof(req));
736 	memset(&resp, 0, sizeof(resp));
737 
738 	req.cmd_id = ICP_QAT_FW_CONSTANTS_CFG;
739 	req.init_cfg_sz = ADF_CONST_TABLE_SIZE;
740 	req.init_cfg_ptr = accel_dev->admin->const_tbl_addr;
741 	if (adf_send_admin(accel_dev, &req, &resp, admin_ae_mask)) {
742 		device_printf(GET_DEV(accel_dev),
743 			      "Error sending constants config message\n");
744 		return EFAULT;
745 	}
746 
747 	memset(&req, 0, sizeof(req));
748 	memset(&resp, 0, sizeof(resp));
749 	req.cmd_id = ICP_QAT_FW_INIT_ME;
750 #ifdef QAT_DISABLE_SAFE_DC_MODE
751 	if (accel_dev->disable_safe_dc_mode)
752 		req.fw_flags = ICP_QAT_FW_INIT_DISABLE_SAFE_DC_MODE_FLAG;
753 #endif /* QAT_DISABLE_SAFE_DC_MODE */
754 	if (adf_send_admin(accel_dev, &req, &resp, ae_mask)) {
755 		device_printf(GET_DEV(accel_dev),
756 			      "Error sending init message\n");
757 		return EFAULT;
758 	}
759 
760 	memset(&req, 0, sizeof(req));
761 	memset(&resp, 0, sizeof(resp));
762 	req.cmd_id = ICP_QAT_FW_HEARTBEAT_TIMER_SET;
763 	req.init_cfg_ptr = accel_dev->admin->phy_hb_addr;
764 	if (adf_get_hb_timer(accel_dev, &req.heartbeat_ticks))
765 		return EINVAL;
766 
767 	if (adf_send_admin(accel_dev, &req, &resp, ae_mask))
768 		device_printf(GET_DEV(accel_dev),
769 			      "Heartbeat is not supported\n");
770 
771 	ret = adf_get_dc_extcapabilities(accel_dev, &dc_capabilities);
772 	if (unlikely(ret)) {
773 		device_printf(GET_DEV(accel_dev),
774 			      "Could not get FW ext. capabilities\n");
775 	}
776 
777 	accel_dev->hw_device->extended_dc_capabilities = dc_capabilities;
778 
779 	adf_get_fw_status(accel_dev,
780 			  &accel_dev->fw_versions.fw_version_major,
781 			  &accel_dev->fw_versions.fw_version_minor,
782 			  &accel_dev->fw_versions.fw_version_patch);
783 
784 	device_printf(GET_DEV(accel_dev),
785 		      "FW version: %d.%d.%d\n",
786 		      accel_dev->fw_versions.fw_version_major,
787 		      accel_dev->fw_versions.fw_version_minor,
788 		      accel_dev->fw_versions.fw_version_patch);
789 
790 	return ret;
791 }
792 
793 static enum dev_sku_info
get_sku(struct adf_hw_device_data * self)794 get_sku(struct adf_hw_device_data *self)
795 {
796 	return DEV_SKU_1;
797 }
798 
799 static struct adf_accel_unit *
get_au_by_ae(struct adf_accel_dev * accel_dev,int ae_num)800 get_au_by_ae(struct adf_accel_dev *accel_dev, int ae_num)
801 {
802 	int i = 0;
803 	struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
804 
805 	if (!accel_unit)
806 		return NULL;
807 
808 	for (i = 0; i < ADF_4XXX_MAX_ACCELUNITS; i++)
809 		if (accel_unit[i].ae_mask & BIT(ae_num))
810 			return &accel_unit[i];
811 
812 	return NULL;
813 }
814 
815 static bool
check_accel_unit_service(enum adf_accel_unit_services au_srv,enum adf_cfg_service_type ring_srv)816 check_accel_unit_service(enum adf_accel_unit_services au_srv,
817 			 enum adf_cfg_service_type ring_srv)
818 {
819 	if ((ADF_ACCEL_SERVICE_NULL == au_srv) && ring_srv == NA)
820 		return true;
821 	if ((au_srv & ADF_ACCEL_COMPRESSION) && ring_srv == COMP)
822 		return true;
823 	if ((au_srv & ADF_ACCEL_ASYM) && ring_srv == ASYM)
824 		return true;
825 	if ((au_srv & ADF_ACCEL_CRYPTO) && ring_srv == SYM)
826 		return true;
827 
828 	return false;
829 }
830 
831 static void
adf_4xxx_cfg_gen_dispatch_arbiter(struct adf_accel_dev * accel_dev,u32 * thrd_to_arb_map_gen)832 adf_4xxx_cfg_gen_dispatch_arbiter(struct adf_accel_dev *accel_dev,
833 				  u32 *thrd_to_arb_map_gen)
834 {
835 	struct adf_accel_unit *au = NULL;
836 	int engine = 0;
837 	int thread = 0;
838 	int service;
839 	u16 ena_srv_mask;
840 	u16 service_type;
841 	u32 service_mask;
842 	unsigned long thd_srv_mask = default_active_thd_mask;
843 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
844 
845 	ena_srv_mask = accel_dev->hw_device->ring_to_svc_map;
846 	/* If ring_to_svc_map is not changed, return default arbiter value */
847 	if (ena_srv_mask == ADF_4XXX_DEFAULT_RING_TO_SRV_MAP) {
848 		memcpy(thrd_to_arb_map_gen,
849 		       thrd_to_arb_map,
850 		       sizeof(thrd_to_arb_map_gen[0]) *
851 			   ADF_4XXX_MAX_ACCELENGINES);
852 		return;
853 	}
854 
855 	for (engine = 0; engine < ADF_4XXX_MAX_ACCELENGINES - 1; engine++) {
856 		thrd_to_arb_map_gen[engine] = 0;
857 		service_mask = 0;
858 		au = get_au_by_ae(accel_dev, engine);
859 		if (!au)
860 			continue;
861 
862 		for (service = 0; service < ADF_CFG_MAX_SERVICES; service++) {
863 			service_type = GET_SRV_TYPE(ena_srv_mask, service);
864 			if (check_accel_unit_service(au->services,
865 						     service_type))
866 				service_mask |= BIT(service);
867 		}
868 
869 		if (au->services == ADF_ACCEL_COMPRESSION)
870 			thd_srv_mask = dc_me_active_thd_mask;
871 		else if (au->services == ADF_ACCEL_ASYM)
872 			thd_srv_mask = hw_data->asym_ae_active_thd_mask;
873 		else
874 			thd_srv_mask = default_active_thd_mask;
875 
876 		for_each_set_bit(thread, &thd_srv_mask, 8)
877 		{
878 			thrd_to_arb_map_gen[engine] |=
879 			    (service_mask << (ADF_CFG_MAX_SERVICES * thread));
880 		}
881 	}
882 }
883 
884 static void
adf_get_arbiter_mapping(struct adf_accel_dev * accel_dev,u32 const ** arb_map_config)885 adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
886 			u32 const **arb_map_config)
887 {
888 	int i;
889 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
890 
891 	for (i = 1; i < ADF_4XXX_MAX_ACCELENGINES; i++) {
892 		if (~hw_device->ae_mask & (1 << i))
893 			thrd_to_arb_map[i] = 0;
894 	}
895 	adf_4xxx_cfg_gen_dispatch_arbiter(accel_dev, thrd_to_arb_map_gen);
896 	*arb_map_config = thrd_to_arb_map_gen;
897 }
898 
899 static void
get_arb_info(struct arb_info * arb_info)900 get_arb_info(struct arb_info *arb_info)
901 {
902 	arb_info->wrk_cfg_offset = ADF_4XXX_ARB_CONFIG;
903 	arb_info->arbiter_offset = ADF_4XXX_ARB_OFFSET;
904 	arb_info->wrk_thd_2_srv_arb_map = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET;
905 }
906 
907 static void
get_admin_info(struct admin_info * admin_csrs_info)908 get_admin_info(struct admin_info *admin_csrs_info)
909 {
910 	admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET;
911 	admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET;
912 	admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET;
913 }
914 
915 static void
adf_enable_error_correction(struct adf_accel_dev * accel_dev)916 adf_enable_error_correction(struct adf_accel_dev *accel_dev)
917 {
918 	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR];
919 	struct resource *csr = misc_bar->virt_addr;
920 
921 	/* Enable all in errsou3 except VFLR notification on host */
922 	ADF_CSR_WR(csr, ADF_4XXX_ERRMSK3, ADF_4XXX_VFLNOTIFY);
923 }
924 
925 static void
adf_enable_ints(struct adf_accel_dev * accel_dev)926 adf_enable_ints(struct adf_accel_dev *accel_dev)
927 {
928 	struct resource *addr;
929 
930 	addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
931 
932 	/* Enable bundle interrupts */
933 	ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0);
934 	ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0);
935 
936 	/* Enable misc interrupts */
937 	ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
938 }
939 
940 static int
adf_init_device(struct adf_accel_dev * accel_dev)941 adf_init_device(struct adf_accel_dev *accel_dev)
942 {
943 	struct resource *addr;
944 	u32 status;
945 	u32 csr;
946 	int ret;
947 
948 	addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
949 
950 	/* Temporarily mask PM interrupt */
951 	csr = ADF_CSR_RD(addr, ADF_4XXX_ERRMSK2);
952 	csr |= ADF_4XXX_PM_SOU;
953 	ADF_CSR_WR(addr, ADF_4XXX_ERRMSK2, csr);
954 
955 	/* Set DRV_ACTIVE bit to power up the device */
956 	ADF_CSR_WR(addr, ADF_4XXX_PM_INTERRUPT, ADF_4XXX_PM_DRV_ACTIVE);
957 
958 	/* Poll status register to make sure the device is powered up */
959 	status = 0;
960 	ret = read_poll_timeout(ADF_CSR_RD,
961 				status,
962 				status & ADF_4XXX_PM_INIT_STATE,
963 				ADF_4XXX_PM_POLL_DELAY_US,
964 				ADF_4XXX_PM_POLL_TIMEOUT_US,
965 				true,
966 				addr,
967 				ADF_4XXX_PM_STATUS);
968 	if (ret)
969 		device_printf(GET_DEV(accel_dev),
970 			      "Failed to power up the device\n");
971 
972 	return ret;
973 }
974 
975 void
adf_init_hw_data_4xxx(struct adf_hw_device_data * hw_data,u32 id)976 adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 id)
977 {
978 	hw_data->dev_class = &adf_4xxx_class;
979 	hw_data->instance_id = adf_4xxx_class.instances++;
980 	hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS;
981 	hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK;
982 	hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS;
983 	hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
984 	hw_data->num_logical_accel = 1;
985 	hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET;
986 	hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK;
987 	hw_data->alloc_irq = adf_isr_resource_alloc;
988 	hw_data->free_irq = adf_isr_resource_free;
989 	hw_data->enable_error_correction = adf_enable_error_correction;
990 	hw_data->get_accel_mask = get_accel_mask;
991 	hw_data->get_ae_mask = get_ae_mask;
992 	hw_data->get_num_accels = get_num_accels;
993 	hw_data->get_num_aes = get_num_aes;
994 	hw_data->get_sram_bar_id = get_sram_bar_id;
995 	hw_data->get_etr_bar_id = get_etr_bar_id;
996 	hw_data->get_misc_bar_id = get_misc_bar_id;
997 	hw_data->get_arb_info = get_arb_info;
998 	hw_data->get_admin_info = get_admin_info;
999 	hw_data->get_accel_cap = adf_4xxx_get_hw_cap;
1000 	hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
1001 	hw_data->get_sku = get_sku;
1002 	hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE;
1003 	switch (id) {
1004 	case ADF_402XX_PCI_DEVICE_ID:
1005 		hw_data->fw_name = ADF_402XX_FW;
1006 		hw_data->fw_mmp_name = ADF_402XX_MMP;
1007 		hw_data->asym_ae_active_thd_mask = DEFAULT_4XXX_ASYM_AE_MASK;
1008 		break;
1009 	case ADF_401XX_PCI_DEVICE_ID:
1010 		hw_data->fw_name = ADF_4XXX_FW;
1011 		hw_data->fw_mmp_name = ADF_4XXX_MMP;
1012 		hw_data->asym_ae_active_thd_mask = DEFAULT_401XX_ASYM_AE_MASK;
1013 		break;
1014 
1015 	default:
1016 		hw_data->fw_name = ADF_4XXX_FW;
1017 		hw_data->fw_mmp_name = ADF_4XXX_MMP;
1018 		hw_data->asym_ae_active_thd_mask = DEFAULT_4XXX_ASYM_AE_MASK;
1019 	}
1020 	hw_data->init_admin_comms = adf_init_admin_comms;
1021 	hw_data->exit_admin_comms = adf_exit_admin_comms;
1022 	hw_data->send_admin_init = adf_4xxx_send_admin_init;
1023 	hw_data->init_arb = adf_init_gen2_arb;
1024 	hw_data->exit_arb = adf_exit_arb;
1025 	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
1026 	hw_data->enable_ints = adf_enable_ints;
1027 	hw_data->init_device = adf_init_device;
1028 	hw_data->reset_device = adf_reset_flr;
1029 	hw_data->restore_device = adf_dev_restore;
1030 	hw_data->init_accel_units = adf_init_accel_units;
1031 	hw_data->exit_accel_units = adf_exit_accel_units;
1032 	hw_data->get_num_accel_units = get_num_accel_units;
1033 	hw_data->configure_accel_units = adf_4xxx_configure_accel_units;
1034 	hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
1035 	hw_data->get_ring_svc_map_data = get_ring_svc_map_data;
1036 	hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
1037 	hw_data->get_objs_num = get_objs_num;
1038 	switch (id) {
1039 	case ADF_402XX_PCI_DEVICE_ID:
1040 		hw_data->get_obj_name = get_obj_name_402xx;
1041 		break;
1042 	default:
1043 		hw_data->get_obj_name = get_obj_name_4xxx;
1044 	}
1045 	hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask;
1046 	hw_data->get_service_type = adf_4xxx_get_service_type;
1047 	hw_data->set_msix_rttable = set_msix_default_rttable;
1048 	hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
1049 	hw_data->disable_iov = adf_disable_sriov;
1050 	hw_data->config_device = adf_config_device;
1051 	hw_data->set_asym_rings_mask = adf_set_asym_rings_mask;
1052 	hw_data->get_hb_clock = get_hb_clock;
1053 	hw_data->int_timer_init = adf_int_timer_init;
1054 	hw_data->int_timer_exit = adf_int_timer_exit;
1055 	hw_data->pre_reset = adf_dev_pre_reset;
1056 	hw_data->post_reset = adf_dev_post_reset;
1057 	hw_data->disable_arb = adf_disable_arb;
1058 	hw_data->get_heartbeat_status = adf_get_heartbeat_status;
1059 	hw_data->get_ae_clock = get_ae_clock;
1060 	hw_data->measure_clock = measure_clock;
1061 	hw_data->query_storage_cap = 1;
1062 	hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
1063 
1064 	adf_gen4_init_hw_csr_info(&hw_data->csr_info);
1065 	adf_gen4_init_pf_pfvf_ops(&hw_data->csr_info.pfvf_ops);
1066 }
1067 
1068 void
adf_clean_hw_data_4xxx(struct adf_hw_device_data * hw_data)1069 adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
1070 {
1071 	hw_data->dev_class->instances--;
1072 }
1073