xref: /linux/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2020 - 2021 Intel Corporation */
3 #include <linux/iopoll.h>
4 #include <adf_accel_devices.h>
5 #include <adf_admin.h>
6 #include <adf_cfg.h>
7 #include <adf_cfg_services.h>
8 #include <adf_clock.h>
9 #include <adf_common_drv.h>
10 #include <adf_fw_config.h>
11 #include <adf_gen4_config.h>
12 #include <adf_gen4_dc.h>
13 #include <adf_gen4_hw_csr_data.h>
14 #include <adf_gen4_hw_data.h>
15 #include <adf_gen4_pfvf.h>
16 #include <adf_gen4_pm.h>
17 #include "adf_gen4_ras.h"
18 #include <adf_gen4_timer.h>
19 #include <adf_gen4_tl.h>
20 #include <adf_gen4_vf_mig.h>
21 #include "adf_4xxx_hw_data.h"
22 #include "icp_qat_hw.h"
23 
24 #define ADF_AE_GROUP_0		GENMASK(3, 0)
25 #define ADF_AE_GROUP_1		GENMASK(7, 4)
26 #define ADF_AE_GROUP_2		BIT(8)
27 
28 #define ENA_THD_MASK_ASYM	GENMASK(1, 0)
29 #define ENA_THD_MASK_ASYM_401XX	GENMASK(5, 0)
30 #define ENA_THD_MASK_SYM	GENMASK(6, 0)
31 #define ENA_THD_MASK_DC		GENMASK(1, 0)
32 
33 static const char * const adf_4xxx_fw_objs[] = {
34 	[ADF_FW_SYM_OBJ] =  ADF_4XXX_SYM_OBJ,
35 	[ADF_FW_ASYM_OBJ] =  ADF_4XXX_ASYM_OBJ,
36 	[ADF_FW_DC_OBJ] =  ADF_4XXX_DC_OBJ,
37 	[ADF_FW_ADMIN_OBJ] = ADF_4XXX_ADMIN_OBJ,
38 };
39 
40 static const char * const adf_402xx_fw_objs[] = {
41 	[ADF_FW_SYM_OBJ] =  ADF_402XX_SYM_OBJ,
42 	[ADF_FW_ASYM_OBJ] =  ADF_402XX_ASYM_OBJ,
43 	[ADF_FW_DC_OBJ] =  ADF_402XX_DC_OBJ,
44 	[ADF_FW_ADMIN_OBJ] = ADF_402XX_ADMIN_OBJ,
45 };
46 
47 static const struct adf_fw_config adf_fw_cy_config[] = {
48 	{ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
49 	{ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
50 	{ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
51 };
52 
53 static const struct adf_fw_config adf_fw_dc_config[] = {
54 	{ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
55 	{ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
56 	{ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
57 };
58 
59 static const struct adf_fw_config adf_fw_sym_config[] = {
60 	{ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
61 	{ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
62 	{ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
63 };
64 
65 static const struct adf_fw_config adf_fw_asym_config[] = {
66 	{ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
67 	{ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ},
68 	{ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
69 };
70 
71 static const struct adf_fw_config adf_fw_asym_dc_config[] = {
72 	{ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ},
73 	{ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
74 	{ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
75 };
76 
77 static const struct adf_fw_config adf_fw_sym_dc_config[] = {
78 	{ADF_AE_GROUP_1, ADF_FW_SYM_OBJ},
79 	{ADF_AE_GROUP_0, ADF_FW_DC_OBJ},
80 	{ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
81 };
82 
83 static const struct adf_fw_config adf_fw_dcc_config[] = {
84 	{ADF_AE_GROUP_1, ADF_FW_DC_OBJ},
85 	{ADF_AE_GROUP_0, ADF_FW_SYM_OBJ},
86 	{ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ},
87 };
88 
89 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dc_config));
90 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_config));
91 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_config));
92 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config));
93 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config));
94 static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config));
95 
96 static struct adf_hw_device_class adf_4xxx_class = {
97 	.name = ADF_4XXX_DEVICE_NAME,
98 	.type = DEV_4XXX,
99 	.instances = 0,
100 };
101 
get_ae_mask(struct adf_hw_device_data * self)102 static u32 get_ae_mask(struct adf_hw_device_data *self)
103 {
104 	u32 me_disable = self->fuses;
105 
106 	return ~me_disable & ADF_4XXX_ACCELENGINES_MASK;
107 }
108 
get_accel_cap(struct adf_accel_dev * accel_dev)109 static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
110 {
111 	struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
112 	u32 capabilities_sym, capabilities_asym, capabilities_dc;
113 	u32 capabilities_dcc;
114 	u32 fusectl1;
115 
116 	/* Read accelerator capabilities mask */
117 	pci_read_config_dword(pdev, ADF_GEN4_FUSECTL1_OFFSET, &fusectl1);
118 
119 	capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
120 			  ICP_ACCEL_CAPABILITIES_CIPHER |
121 			  ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
122 			  ICP_ACCEL_CAPABILITIES_SHA3 |
123 			  ICP_ACCEL_CAPABILITIES_SHA3_EXT |
124 			  ICP_ACCEL_CAPABILITIES_HKDF |
125 			  ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
126 			  ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
127 			  ICP_ACCEL_CAPABILITIES_SM3 |
128 			  ICP_ACCEL_CAPABILITIES_SM4 |
129 			  ICP_ACCEL_CAPABILITIES_AES_V2;
130 
131 	/* A set bit in fusectl1 means the feature is OFF in this SKU */
132 	if (fusectl1 & ICP_ACCEL_GEN4_MASK_CIPHER_SLICE) {
133 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
134 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF;
135 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
136 	}
137 
138 	if (fusectl1 & ICP_ACCEL_GEN4_MASK_UCS_SLICE) {
139 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
140 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
141 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
142 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
143 	}
144 
145 	if (fusectl1 & ICP_ACCEL_GEN4_MASK_AUTH_SLICE) {
146 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
147 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
148 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
149 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
150 	}
151 
152 	if (fusectl1 & ICP_ACCEL_GEN4_MASK_SMX_SLICE) {
153 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3;
154 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4;
155 	}
156 
157 	capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
158 			  ICP_ACCEL_CAPABILITIES_CIPHER |
159 			  ICP_ACCEL_CAPABILITIES_SM2 |
160 			  ICP_ACCEL_CAPABILITIES_ECEDMONT;
161 
162 	if (fusectl1 & ICP_ACCEL_GEN4_MASK_PKE_SLICE) {
163 		capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
164 		capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
165 		capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
166 	}
167 
168 	capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
169 			  ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
170 			  ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
171 			  ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
172 
173 	if (fusectl1 & ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE) {
174 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
175 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
176 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
177 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
178 	}
179 
180 	switch (adf_get_service_enabled(accel_dev)) {
181 	case SVC_CY:
182 	case SVC_CY2:
183 		return capabilities_sym | capabilities_asym;
184 	case SVC_DC:
185 		return capabilities_dc;
186 	case SVC_DCC:
187 		/*
188 		 * Sym capabilities are available for chaining operations,
189 		 * but sym crypto instances cannot be supported
190 		 */
191 		capabilities_dcc = capabilities_dc | capabilities_sym;
192 		capabilities_dcc &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
193 		return capabilities_dcc;
194 	case SVC_SYM:
195 		return capabilities_sym;
196 	case SVC_ASYM:
197 		return capabilities_asym;
198 	case SVC_ASYM_DC:
199 	case SVC_DC_ASYM:
200 		return capabilities_asym | capabilities_dc;
201 	case SVC_SYM_DC:
202 	case SVC_DC_SYM:
203 		return capabilities_sym | capabilities_dc;
204 	default:
205 		return 0;
206 	}
207 }
208 
adf_get_arbiter_mapping(struct adf_accel_dev * accel_dev)209 static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
210 {
211 	if (adf_gen4_init_thd2arb_map(accel_dev))
212 		dev_warn(&GET_DEV(accel_dev),
213 			 "Failed to generate thread to arbiter mapping");
214 
215 	return GET_HW_DATA(accel_dev)->thd_to_arb_map;
216 }
217 
adf_init_rl_data(struct adf_rl_hw_data * rl_data)218 static void adf_init_rl_data(struct adf_rl_hw_data *rl_data)
219 {
220 	rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET;
221 	rl_data->pciin_tb_offset = ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET;
222 	rl_data->r2l_offset = ADF_GEN4_RL_R2L_OFFSET;
223 	rl_data->l2c_offset = ADF_GEN4_RL_L2C_OFFSET;
224 	rl_data->c2s_offset = ADF_GEN4_RL_C2S_OFFSET;
225 
226 	rl_data->pcie_scale_div = ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV;
227 	rl_data->pcie_scale_mul = ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL;
228 	rl_data->dcpr_correction = ADF_4XXX_RL_DCPR_CORRECTION;
229 	rl_data->max_tp[ADF_SVC_ASYM] = ADF_4XXX_RL_MAX_TP_ASYM;
230 	rl_data->max_tp[ADF_SVC_SYM] = ADF_4XXX_RL_MAX_TP_SYM;
231 	rl_data->max_tp[ADF_SVC_DC] = ADF_4XXX_RL_MAX_TP_DC;
232 	rl_data->scan_interval = ADF_4XXX_RL_SCANS_PER_SEC;
233 	rl_data->scale_ref = ADF_4XXX_RL_SLICE_REF;
234 }
235 
uof_get_num_objs(struct adf_accel_dev * accel_dev)236 static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
237 {
238 	return ARRAY_SIZE(adf_fw_cy_config);
239 }
240 
get_fw_config(struct adf_accel_dev * accel_dev)241 static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev)
242 {
243 	switch (adf_get_service_enabled(accel_dev)) {
244 	case SVC_CY:
245 	case SVC_CY2:
246 		return adf_fw_cy_config;
247 	case SVC_DC:
248 		return adf_fw_dc_config;
249 	case SVC_DCC:
250 		return adf_fw_dcc_config;
251 	case SVC_SYM:
252 		return adf_fw_sym_config;
253 	case SVC_ASYM:
254 		return adf_fw_asym_config;
255 	case SVC_ASYM_DC:
256 	case SVC_DC_ASYM:
257 		return adf_fw_asym_dc_config;
258 	case SVC_SYM_DC:
259 	case SVC_DC_SYM:
260 		return adf_fw_sym_dc_config;
261 	default:
262 		return NULL;
263 	}
264 }
265 
get_rp_group(struct adf_accel_dev * accel_dev,u32 ae_mask)266 static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask)
267 {
268 	switch (ae_mask) {
269 	case ADF_AE_GROUP_0:
270 		return RP_GROUP_0;
271 	case ADF_AE_GROUP_1:
272 		return RP_GROUP_1;
273 	default:
274 		dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized");
275 		return -EINVAL;
276 	}
277 }
278 
get_ena_thd_mask(struct adf_accel_dev * accel_dev,u32 obj_num)279 static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
280 {
281 	const struct adf_fw_config *fw_config;
282 
283 	if (obj_num >= uof_get_num_objs(accel_dev))
284 		return ADF_GEN4_ENA_THD_MASK_ERROR;
285 
286 	fw_config = get_fw_config(accel_dev);
287 	if (!fw_config)
288 		return ADF_GEN4_ENA_THD_MASK_ERROR;
289 
290 	switch (fw_config[obj_num].obj) {
291 	case ADF_FW_ASYM_OBJ:
292 		return ENA_THD_MASK_ASYM;
293 	case ADF_FW_SYM_OBJ:
294 		return ENA_THD_MASK_SYM;
295 	case ADF_FW_DC_OBJ:
296 		return ENA_THD_MASK_DC;
297 	default:
298 		return ADF_GEN4_ENA_THD_MASK_ERROR;
299 	}
300 }
301 
get_ena_thd_mask_401xx(struct adf_accel_dev * accel_dev,u32 obj_num)302 static u32 get_ena_thd_mask_401xx(struct adf_accel_dev *accel_dev, u32 obj_num)
303 {
304 	const struct adf_fw_config *fw_config;
305 
306 	if (obj_num >= uof_get_num_objs(accel_dev))
307 		return ADF_GEN4_ENA_THD_MASK_ERROR;
308 
309 	fw_config = get_fw_config(accel_dev);
310 	if (!fw_config)
311 		return ADF_GEN4_ENA_THD_MASK_ERROR;
312 
313 	switch (fw_config[obj_num].obj) {
314 	case ADF_FW_ASYM_OBJ:
315 		return ENA_THD_MASK_ASYM_401XX;
316 	case ADF_FW_SYM_OBJ:
317 		return ENA_THD_MASK_SYM;
318 	case ADF_FW_DC_OBJ:
319 		return ENA_THD_MASK_DC;
320 	default:
321 		return ADF_GEN4_ENA_THD_MASK_ERROR;
322 	}
323 }
324 
uof_get_name(struct adf_accel_dev * accel_dev,u32 obj_num,const char * const fw_objs[],int num_objs)325 static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num,
326 				const char * const fw_objs[], int num_objs)
327 {
328 	const struct adf_fw_config *fw_config;
329 	int id;
330 
331 	fw_config = get_fw_config(accel_dev);
332 	if (fw_config)
333 		id = fw_config[obj_num].obj;
334 	else
335 		id = -EINVAL;
336 
337 	if (id < 0 || id > num_objs)
338 		return NULL;
339 
340 	return fw_objs[id];
341 }
342 
uof_get_name_4xxx(struct adf_accel_dev * accel_dev,u32 obj_num)343 static const char *uof_get_name_4xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
344 {
345 	int num_fw_objs = ARRAY_SIZE(adf_4xxx_fw_objs);
346 
347 	return uof_get_name(accel_dev, obj_num, adf_4xxx_fw_objs, num_fw_objs);
348 }
349 
uof_get_name_402xx(struct adf_accel_dev * accel_dev,u32 obj_num)350 static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_num)
351 {
352 	int num_fw_objs = ARRAY_SIZE(adf_402xx_fw_objs);
353 
354 	return uof_get_name(accel_dev, obj_num, adf_402xx_fw_objs, num_fw_objs);
355 }
356 
uof_get_obj_type(struct adf_accel_dev * accel_dev,u32 obj_num)357 static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num)
358 {
359 	const struct adf_fw_config *fw_config;
360 
361 	if (obj_num >= uof_get_num_objs(accel_dev))
362 		return -EINVAL;
363 
364 	fw_config = get_fw_config(accel_dev);
365 	if (!fw_config)
366 		return -EINVAL;
367 
368 	return fw_config[obj_num].obj;
369 }
370 
uof_get_ae_mask(struct adf_accel_dev * accel_dev,u32 obj_num)371 static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
372 {
373 	const struct adf_fw_config *fw_config;
374 
375 	fw_config = get_fw_config(accel_dev);
376 	if (!fw_config)
377 		return 0;
378 
379 	return fw_config[obj_num].ae_mask;
380 }
381 
adf_gen4_set_err_mask(struct adf_dev_err_mask * dev_err_mask)382 static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask)
383 {
384 	dev_err_mask->cppagentcmdpar_mask = ADF_4XXX_HICPPAGENTCMDPARERRLOG_MASK;
385 	dev_err_mask->parerr_ath_cph_mask = ADF_4XXX_PARITYERRORMASK_ATH_CPH_MASK;
386 	dev_err_mask->parerr_cpr_xlt_mask = ADF_4XXX_PARITYERRORMASK_CPR_XLT_MASK;
387 	dev_err_mask->parerr_dcpr_ucs_mask = ADF_4XXX_PARITYERRORMASK_DCPR_UCS_MASK;
388 	dev_err_mask->parerr_pke_mask = ADF_4XXX_PARITYERRORMASK_PKE_MASK;
389 	dev_err_mask->ssmfeatren_mask = ADF_4XXX_SSMFEATREN_MASK;
390 }
391 
adf_init_hw_data_4xxx(struct adf_hw_device_data * hw_data,u32 dev_id)392 void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id)
393 {
394 	hw_data->dev_class = &adf_4xxx_class;
395 	hw_data->instance_id = adf_4xxx_class.instances++;
396 	hw_data->num_banks = ADF_GEN4_ETR_MAX_BANKS;
397 	hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF;
398 	hw_data->num_rings_per_bank = ADF_GEN4_NUM_RINGS_PER_BANK;
399 	hw_data->num_accel = ADF_GEN4_MAX_ACCELERATORS;
400 	hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES;
401 	hw_data->num_logical_accel = 1;
402 	hw_data->tx_rx_gap = ADF_GEN4_RX_RINGS_OFFSET;
403 	hw_data->tx_rings_mask = ADF_GEN4_TX_RINGS_MASK;
404 	hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP;
405 	hw_data->alloc_irq = adf_isr_resource_alloc;
406 	hw_data->free_irq = adf_isr_resource_free;
407 	hw_data->enable_error_correction = adf_gen4_enable_error_correction;
408 	hw_data->get_accel_mask = adf_gen4_get_accel_mask;
409 	hw_data->get_ae_mask = get_ae_mask;
410 	hw_data->get_num_accels = adf_gen4_get_num_accels;
411 	hw_data->get_num_aes = adf_gen4_get_num_aes;
412 	hw_data->get_sram_bar_id = adf_gen4_get_sram_bar_id;
413 	hw_data->get_etr_bar_id = adf_gen4_get_etr_bar_id;
414 	hw_data->get_misc_bar_id = adf_gen4_get_misc_bar_id;
415 	hw_data->get_arb_info = adf_gen4_get_arb_info;
416 	hw_data->get_admin_info = adf_gen4_get_admin_info;
417 	hw_data->get_accel_cap = get_accel_cap;
418 	hw_data->get_sku = adf_gen4_get_sku;
419 	hw_data->init_admin_comms = adf_init_admin_comms;
420 	hw_data->exit_admin_comms = adf_exit_admin_comms;
421 	hw_data->send_admin_init = adf_send_admin_init;
422 	hw_data->init_arb = adf_init_arb;
423 	hw_data->exit_arb = adf_exit_arb;
424 	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
425 	hw_data->enable_ints = adf_gen4_enable_ints;
426 	hw_data->init_device = adf_gen4_init_device;
427 	hw_data->reset_device = adf_reset_flr;
428 	hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
429 	hw_data->num_rps = ADF_GEN4_MAX_RPS;
430 	switch (dev_id) {
431 	case ADF_402XX_PCI_DEVICE_ID:
432 		hw_data->fw_name = ADF_402XX_FW;
433 		hw_data->fw_mmp_name = ADF_402XX_MMP;
434 		hw_data->uof_get_name = uof_get_name_402xx;
435 		hw_data->get_ena_thd_mask = get_ena_thd_mask;
436 		break;
437 	case ADF_401XX_PCI_DEVICE_ID:
438 		hw_data->fw_name = ADF_4XXX_FW;
439 		hw_data->fw_mmp_name = ADF_4XXX_MMP;
440 		hw_data->uof_get_name = uof_get_name_4xxx;
441 		hw_data->get_ena_thd_mask = get_ena_thd_mask_401xx;
442 		break;
443 	default:
444 		hw_data->fw_name = ADF_4XXX_FW;
445 		hw_data->fw_mmp_name = ADF_4XXX_MMP;
446 		hw_data->uof_get_name = uof_get_name_4xxx;
447 		hw_data->get_ena_thd_mask = get_ena_thd_mask;
448 		break;
449 	}
450 	hw_data->uof_get_num_objs = uof_get_num_objs;
451 	hw_data->uof_get_obj_type = uof_get_obj_type;
452 	hw_data->uof_get_ae_mask = uof_get_ae_mask;
453 	hw_data->get_rp_group = get_rp_group;
454 	hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable;
455 	hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
456 	hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map;
457 	hw_data->disable_iov = adf_disable_sriov;
458 	hw_data->ring_pair_reset = adf_gen4_ring_pair_reset;
459 	hw_data->bank_state_save = adf_gen4_bank_state_save;
460 	hw_data->bank_state_restore = adf_gen4_bank_state_restore;
461 	hw_data->enable_pm = adf_gen4_enable_pm;
462 	hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt;
463 	hw_data->dev_config = adf_gen4_dev_config;
464 	hw_data->start_timer = adf_gen4_timer_start;
465 	hw_data->stop_timer = adf_gen4_timer_stop;
466 	hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock;
467 	hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
468 	hw_data->clock_frequency = ADF_4XXX_AE_FREQ;
469 
470 	adf_gen4_set_err_mask(&hw_data->dev_err_mask);
471 	adf_gen4_init_hw_csr_ops(&hw_data->csr_ops);
472 	adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops);
473 	adf_gen4_init_dc_ops(&hw_data->dc_ops);
474 	adf_gen4_init_ras_ops(&hw_data->ras_ops);
475 	adf_gen4_init_tl_data(&hw_data->tl_data);
476 	adf_gen4_init_vf_mig_ops(&hw_data->vfmig_ops);
477 	adf_init_rl_data(&hw_data->rl_data);
478 }
479 
adf_clean_hw_data_4xxx(struct adf_hw_device_data * hw_data)480 void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data)
481 {
482 	hw_data->dev_class->instances--;
483 }
484