xref: /freebsd/sys/dev/qat/qat_hw/qat_200xx/adf_200xx_hw_data.c (revision 71625ec9ad2a9bc8c09784fbd23b759830e0ee5f)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 #include <adf_accel_devices.h>
4 #include <adf_common_drv.h>
5 #include <adf_cfg.h>
6 #include <adf_pfvf_msg.h>
7 #include <adf_dev_err.h>
8 #include <adf_gen2_hw_data.h>
9 #include <adf_gen2_pfvf.h>
10 #include "adf_200xx_hw_data.h"
11 #include "icp_qat_hw.h"
12 #include "adf_heartbeat.h"
13 
14 /* Worker thread to service arbiter mappings */
15 static const u32 thrd_to_arb_map[ADF_200XX_MAX_ACCELENGINES] =
16     { 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA };
17 
18 enum { DEV_200XX_SKU_1 = 0, DEV_200XX_SKU_2 = 1, DEV_200XX_SKU_3 = 2 };
19 
20 static u32 thrd_to_arb_map_gen[ADF_200XX_MAX_ACCELENGINES] = { 0 };
21 
22 static struct adf_hw_device_class qat_200xx_class = {.name =
23 							 ADF_200XX_DEVICE_NAME,
24 						     .type = DEV_200XX,
25 						     .instances = 0 };
26 
27 static u32
get_accel_mask(struct adf_accel_dev * accel_dev)28 get_accel_mask(struct adf_accel_dev *accel_dev)
29 {
30 	device_t pdev = accel_dev->accel_pci_dev.pci_dev;
31 
32 	u32 fuse;
33 	u32 straps;
34 
35 	fuse = pci_read_config(pdev, ADF_DEVICE_FUSECTL_OFFSET, 4);
36 	straps = pci_read_config(pdev, ADF_200XX_SOFTSTRAP_CSR_OFFSET, 4);
37 
38 	return (~(fuse | straps)) >> ADF_200XX_ACCELERATORS_REG_OFFSET &
39 	    ADF_200XX_ACCELERATORS_MASK;
40 }
41 
42 static u32
get_ae_mask(struct adf_accel_dev * accel_dev)43 get_ae_mask(struct adf_accel_dev *accel_dev)
44 {
45 	device_t pdev = accel_dev->accel_pci_dev.pci_dev;
46 	u32 fuse;
47 	u32 me_straps;
48 	u32 me_disable;
49 	u32 ssms_disabled;
50 
51 	fuse = pci_read_config(pdev, ADF_DEVICE_FUSECTL_OFFSET, 4);
52 	me_straps = pci_read_config(pdev, ADF_200XX_SOFTSTRAP_CSR_OFFSET, 4);
53 
54 	/* If SSMs are disabled, then disable the corresponding MEs */
55 	ssms_disabled =
56 	    (~get_accel_mask(accel_dev)) & ADF_200XX_ACCELERATORS_MASK;
57 	me_disable = 0x3;
58 	while (ssms_disabled) {
59 		if (ssms_disabled & 1)
60 			me_straps |= me_disable;
61 		ssms_disabled >>= 1;
62 		me_disable <<= 2;
63 	}
64 
65 	return (~(fuse | me_straps)) & ADF_200XX_ACCELENGINES_MASK;
66 }
67 
68 static u32
get_num_accels(struct adf_hw_device_data * self)69 get_num_accels(struct adf_hw_device_data *self)
70 {
71 	u32 i, ctr = 0;
72 
73 	if (!self || !self->accel_mask)
74 		return 0;
75 
76 	for (i = 0; i < ADF_200XX_MAX_ACCELERATORS; i++) {
77 		if (self->accel_mask & (1 << i))
78 			ctr++;
79 	}
80 	return ctr;
81 }
82 
83 static u32
get_num_aes(struct adf_hw_device_data * self)84 get_num_aes(struct adf_hw_device_data *self)
85 {
86 	u32 i, ctr = 0;
87 
88 	if (!self || !self->ae_mask)
89 		return 0;
90 
91 	for (i = 0; i < ADF_200XX_MAX_ACCELENGINES; i++) {
92 		if (self->ae_mask & (1 << i))
93 			ctr++;
94 	}
95 	return ctr;
96 }
97 
98 static u32
get_misc_bar_id(struct adf_hw_device_data * self)99 get_misc_bar_id(struct adf_hw_device_data *self)
100 {
101 	return ADF_200XX_PMISC_BAR;
102 }
103 
104 static u32
get_etr_bar_id(struct adf_hw_device_data * self)105 get_etr_bar_id(struct adf_hw_device_data *self)
106 {
107 	return ADF_200XX_ETR_BAR;
108 }
109 
110 static u32
get_sram_bar_id(struct adf_hw_device_data * self)111 get_sram_bar_id(struct adf_hw_device_data *self)
112 {
113 	return 0;
114 }
115 
116 static enum dev_sku_info
get_sku(struct adf_hw_device_data * self)117 get_sku(struct adf_hw_device_data *self)
118 {
119 	int aes = get_num_aes(self);
120 
121 	if (aes == 6)
122 		return DEV_SKU_4;
123 
124 	return DEV_SKU_UNKNOWN;
125 }
126 
127 static void
adf_get_arbiter_mapping(struct adf_accel_dev * accel_dev,u32 const ** arb_map_config)128 adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
129 			u32 const **arb_map_config)
130 {
131 	int i;
132 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
133 
134 	for (i = 0; i < ADF_200XX_MAX_ACCELENGINES; i++) {
135 		thrd_to_arb_map_gen[i] = 0;
136 		if (hw_device->ae_mask & (1 << i))
137 			thrd_to_arb_map_gen[i] = thrd_to_arb_map[i];
138 	}
139 	adf_cfg_gen_dispatch_arbiter(accel_dev,
140 				     thrd_to_arb_map,
141 				     thrd_to_arb_map_gen,
142 				     ADF_200XX_MAX_ACCELENGINES);
143 	*arb_map_config = thrd_to_arb_map_gen;
144 }
145 
146 static void
get_arb_info(struct arb_info * arb_csrs_info)147 get_arb_info(struct arb_info *arb_csrs_info)
148 {
149 	arb_csrs_info->arbiter_offset = ADF_200XX_ARB_OFFSET;
150 	arb_csrs_info->wrk_thd_2_srv_arb_map =
151 	    ADF_200XX_ARB_WRK_2_SER_MAP_OFFSET;
152 	arb_csrs_info->wrk_cfg_offset = ADF_200XX_ARB_WQCFG_OFFSET;
153 }
154 
155 static void
get_admin_info(struct admin_info * admin_csrs_info)156 get_admin_info(struct admin_info *admin_csrs_info)
157 {
158 	admin_csrs_info->mailbox_offset = ADF_200XX_MAILBOX_BASE_OFFSET;
159 	admin_csrs_info->admin_msg_ur = ADF_200XX_ADMINMSGUR_OFFSET;
160 	admin_csrs_info->admin_msg_lr = ADF_200XX_ADMINMSGLR_OFFSET;
161 }
162 
163 static void
get_errsou_offset(u32 * errsou3,u32 * errsou5)164 get_errsou_offset(u32 *errsou3, u32 *errsou5)
165 {
166 	*errsou3 = ADF_200XX_ERRSOU3;
167 	*errsou5 = ADF_200XX_ERRSOU5;
168 }
169 
170 static u32
get_clock_speed(struct adf_hw_device_data * self)171 get_clock_speed(struct adf_hw_device_data *self)
172 {
173 	/* CPP clock is half high-speed clock */
174 	return self->clock_frequency / 2;
175 }
176 
177 static void
adf_enable_error_interrupts(struct resource * csr)178 adf_enable_error_interrupts(struct resource *csr)
179 {
180 	ADF_CSR_WR(csr, ADF_ERRMSK0, ADF_200XX_ERRMSK0_CERR); /* ME0-ME3 */
181 	ADF_CSR_WR(csr, ADF_ERRMSK1, ADF_200XX_ERRMSK1_CERR); /* ME4-ME5 */
182 	ADF_CSR_WR(csr, ADF_ERRMSK5, ADF_200XX_ERRMSK5_CERR); /* SSM2 */
183 
184 	/* Reset everything except VFtoPF1_16. */
185 	adf_csr_fetch_and_and(csr, ADF_ERRMSK3, ADF_200XX_VF2PF1_16);
186 
187 	/* RI CPP bus interface error detection and reporting. */
188 	ADF_CSR_WR(csr, ADF_200XX_RICPPINTCTL, ADF_200XX_RICPP_EN);
189 
190 	/* TI CPP bus interface error detection and reporting. */
191 	ADF_CSR_WR(csr, ADF_200XX_TICPPINTCTL, ADF_200XX_TICPP_EN);
192 
193 	/* Enable CFC Error interrupts and logging. */
194 	ADF_CSR_WR(csr, ADF_200XX_CPP_CFC_ERR_CTRL, ADF_200XX_CPP_CFC_UE);
195 }
196 
197 static void
adf_disable_error_interrupts(struct adf_accel_dev * accel_dev)198 adf_disable_error_interrupts(struct adf_accel_dev *accel_dev)
199 {
200 	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_200XX_PMISC_BAR];
201 	struct resource *csr = misc_bar->virt_addr;
202 
203 	/* ME0-ME3 */
204 	ADF_CSR_WR(csr,
205 		   ADF_ERRMSK0,
206 		   ADF_200XX_ERRMSK0_UERR | ADF_200XX_ERRMSK0_CERR);
207 	/* ME4-ME5 */
208 	ADF_CSR_WR(csr,
209 		   ADF_ERRMSK1,
210 		   ADF_200XX_ERRMSK1_UERR | ADF_200XX_ERRMSK1_CERR);
211 	/* CPP Push Pull, RI, TI, SSM0-SSM1, CFC */
212 	ADF_CSR_WR(csr, ADF_ERRMSK3, ADF_200XX_ERRMSK3_UERR);
213 	/* SSM2 */
214 	ADF_CSR_WR(csr, ADF_ERRMSK5, ADF_200XX_ERRMSK5_UERR);
215 }
216 
217 static int
adf_check_uncorrectable_error(struct adf_accel_dev * accel_dev)218 adf_check_uncorrectable_error(struct adf_accel_dev *accel_dev)
219 {
220 	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_200XX_PMISC_BAR];
221 	struct resource *csr = misc_bar->virt_addr;
222 
223 	u32 errsou0 = ADF_CSR_RD(csr, ADF_ERRSOU0) & ADF_200XX_ERRMSK0_UERR;
224 	u32 errsou1 = ADF_CSR_RD(csr, ADF_ERRSOU1) & ADF_200XX_ERRMSK1_UERR;
225 	u32 errsou3 = ADF_CSR_RD(csr, ADF_ERRSOU3) & ADF_200XX_ERRMSK3_UERR;
226 	u32 errsou5 = ADF_CSR_RD(csr, ADF_ERRSOU5) & ADF_200XX_ERRMSK5_UERR;
227 
228 	return (errsou0 | errsou1 | errsou3 | errsou5);
229 }
230 
231 static void
adf_enable_mmp_error_correction(struct resource * csr,struct adf_hw_device_data * hw_data)232 adf_enable_mmp_error_correction(struct resource *csr,
233 				struct adf_hw_device_data *hw_data)
234 {
235 	unsigned int dev, mmp;
236 	unsigned int mask;
237 
238 	/* Enable MMP Logging */
239 	for (dev = 0, mask = hw_data->accel_mask; mask; dev++, mask >>= 1) {
240 		if (!(mask & 1))
241 			continue;
242 		/* Set power-up */
243 		adf_csr_fetch_and_and(csr,
244 				      ADF_200XX_SLICEPWRDOWN(dev),
245 				      ~ADF_200XX_MMP_PWR_UP_MSK);
246 
247 		if (hw_data->accel_capabilities_mask &
248 		    ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC) {
249 			for (mmp = 0; mmp < ADF_MAX_MMP; ++mmp) {
250 				/*
251 				 * The device supports PKE,
252 				 * so enable error reporting from MMP memory
253 				 */
254 				adf_csr_fetch_and_or(csr,
255 						     ADF_UERRSSMMMP(dev, mmp),
256 						     ADF_200XX_UERRSSMMMP_EN);
257 				/*
258 				 * The device supports PKE,
259 				 * so enable error correction from MMP memory
260 				 */
261 				adf_csr_fetch_and_or(csr,
262 						     ADF_CERRSSMMMP(dev, mmp),
263 						     ADF_200XX_CERRSSMMMP_EN);
264 			}
265 		} else {
266 			for (mmp = 0; mmp < ADF_MAX_MMP; ++mmp) {
267 				/*
268 				 * The device doesn't support PKE,
269 				 * so disable error reporting from MMP memory
270 				 */
271 				adf_csr_fetch_and_and(csr,
272 						      ADF_UERRSSMMMP(dev, mmp),
273 						      ~ADF_200XX_UERRSSMMMP_EN);
274 				/*
275 				 * The device doesn't support PKE,
276 				 * so disable error correction from MMP memory
277 				 */
278 				adf_csr_fetch_and_and(csr,
279 						      ADF_CERRSSMMMP(dev, mmp),
280 						      ~ADF_200XX_CERRSSMMMP_EN);
281 			}
282 		}
283 
284 		/* Restore power-down value */
285 		adf_csr_fetch_and_or(csr,
286 				     ADF_200XX_SLICEPWRDOWN(dev),
287 				     ADF_200XX_MMP_PWR_UP_MSK);
288 
289 		/* Disabling correctable error interrupts. */
290 		ADF_CSR_WR(csr,
291 			   ADF_200XX_INTMASKSSM(dev),
292 			   ADF_200XX_INTMASKSSM_UERR);
293 	}
294 }
295 
296 static void
adf_enable_error_correction(struct adf_accel_dev * accel_dev)297 adf_enable_error_correction(struct adf_accel_dev *accel_dev)
298 {
299 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
300 	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_200XX_PMISC_BAR];
301 	struct resource *csr = misc_bar->virt_addr;
302 	unsigned int val, i;
303 	unsigned int mask;
304 
305 	/* Enable Accel Engine error detection & correction */
306 	mask = hw_device->ae_mask;
307 	for (i = 0; mask; i++, mask >>= 1) {
308 		if (!(mask & 1))
309 			continue;
310 		val = ADF_CSR_RD(csr, ADF_200XX_AE_CTX_ENABLES(i));
311 		val |= ADF_200XX_ENABLE_AE_ECC_ERR;
312 		ADF_CSR_WR(csr, ADF_200XX_AE_CTX_ENABLES(i), val);
313 		val = ADF_CSR_RD(csr, ADF_200XX_AE_MISC_CONTROL(i));
314 		val |= ADF_200XX_ENABLE_AE_ECC_PARITY_CORR;
315 		ADF_CSR_WR(csr, ADF_200XX_AE_MISC_CONTROL(i), val);
316 	}
317 
318 	/* Enable shared memory error detection & correction */
319 	mask = hw_device->accel_mask;
320 	for (i = 0; mask; i++, mask >>= 1) {
321 		if (!(mask & 1))
322 			continue;
323 		val = ADF_CSR_RD(csr, ADF_200XX_UERRSSMSH(i));
324 		val |= ADF_200XX_ERRSSMSH_EN;
325 		ADF_CSR_WR(csr, ADF_200XX_UERRSSMSH(i), val);
326 		val = ADF_CSR_RD(csr, ADF_200XX_CERRSSMSH(i));
327 		val |= ADF_200XX_ERRSSMSH_EN;
328 		ADF_CSR_WR(csr, ADF_200XX_CERRSSMSH(i), val);
329 		val = ADF_CSR_RD(csr, ADF_PPERR(i));
330 		val |= ADF_200XX_PPERR_EN;
331 		ADF_CSR_WR(csr, ADF_PPERR(i), val);
332 	}
333 
334 	adf_enable_error_interrupts(csr);
335 	adf_enable_mmp_error_correction(csr, hw_device);
336 }
337 
338 static void
adf_enable_ints(struct adf_accel_dev * accel_dev)339 adf_enable_ints(struct adf_accel_dev *accel_dev)
340 {
341 	struct resource *addr;
342 
343 	addr = (&GET_BARS(accel_dev)[ADF_200XX_PMISC_BAR])->virt_addr;
344 
345 	/* Enable bundle and misc interrupts */
346 	ADF_CSR_WR(addr, ADF_200XX_SMIAPF0_MASK_OFFSET, ADF_200XX_SMIA0_MASK);
347 	ADF_CSR_WR(addr, ADF_200XX_SMIAPF1_MASK_OFFSET, ADF_200XX_SMIA1_MASK);
348 }
349 
350 static u32
get_ae_clock(struct adf_hw_device_data * self)351 get_ae_clock(struct adf_hw_device_data *self)
352 {
353 	/*
354 	 * Clock update interval is <16> ticks for 200xx.
355 	 */
356 	return self->clock_frequency / 16;
357 }
358 
359 static int
get_storage_enabled(struct adf_accel_dev * accel_dev,uint32_t * storage_enabled)360 get_storage_enabled(struct adf_accel_dev *accel_dev, uint32_t *storage_enabled)
361 {
362 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
363 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
364 
365 	strlcpy(key, ADF_STORAGE_FIRMWARE_ENABLED, sizeof(key));
366 	if (!adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) {
367 		if (kstrtouint(val, 0, storage_enabled))
368 			return -EFAULT;
369 	}
370 	return 0;
371 }
372 
373 static int
measure_clock(struct adf_accel_dev * accel_dev)374 measure_clock(struct adf_accel_dev *accel_dev)
375 {
376 	u32 frequency;
377 	int ret = 0;
378 
379 	ret = adf_dev_measure_clock(accel_dev,
380 				    &frequency,
381 				    ADF_200XX_MIN_AE_FREQ,
382 				    ADF_200XX_MAX_AE_FREQ);
383 	if (ret)
384 		return ret;
385 
386 	accel_dev->hw_device->clock_frequency = frequency;
387 	return 0;
388 }
389 
390 static u32
adf_200xx_get_hw_cap(struct adf_accel_dev * accel_dev)391 adf_200xx_get_hw_cap(struct adf_accel_dev *accel_dev)
392 {
393 	device_t pdev = accel_dev->accel_pci_dev.pci_dev;
394 	u32 legfuses;
395 	u32 capabilities;
396 	u32 straps;
397 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
398 	u32 fuses = hw_data->fuses;
399 
400 	/* Read accelerator capabilities mask */
401 	legfuses = pci_read_config(pdev, ADF_DEVICE_LEGFUSE_OFFSET, 4);
402 	capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC +
403 	    ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC +
404 	    ICP_ACCEL_CAPABILITIES_CIPHER +
405 	    ICP_ACCEL_CAPABILITIES_AUTHENTICATION +
406 	    ICP_ACCEL_CAPABILITIES_COMPRESSION + ICP_ACCEL_CAPABILITIES_ZUC +
407 	    ICP_ACCEL_CAPABILITIES_SHA3 + ICP_ACCEL_CAPABILITIES_HKDF +
408 	    ICP_ACCEL_CAPABILITIES_ECEDMONT +
409 	    ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN;
410 	if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE)
411 		capabilities &= ~(ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
412 				  ICP_ACCEL_CAPABILITIES_CIPHER |
413 				  ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN);
414 	if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE)
415 		capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
416 	if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
417 		capabilities &= ~(ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
418 				  ICP_ACCEL_CAPABILITIES_ECEDMONT);
419 	if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
420 		capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
421 	if (legfuses & ICP_ACCEL_MASK_EIA3_SLICE)
422 		capabilities &= ~ICP_ACCEL_CAPABILITIES_ZUC;
423 	if (legfuses & ICP_ACCEL_MASK_SHA3_SLICE)
424 		capabilities &= ~ICP_ACCEL_CAPABILITIES_SHA3;
425 
426 	straps = pci_read_config(pdev, ADF_200XX_SOFTSTRAP_CSR_OFFSET, 4);
427 	if ((straps | fuses) & ADF_200XX_POWERGATE_PKE)
428 		capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
429 	if ((straps | fuses) & ADF_200XX_POWERGATE_CY)
430 		capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
431 
432 	return capabilities;
433 }
434 
435 static const char *
get_obj_name(struct adf_accel_dev * accel_dev,enum adf_accel_unit_services service)436 get_obj_name(struct adf_accel_dev *accel_dev,
437 	     enum adf_accel_unit_services service)
438 {
439 	return ADF_CXXX_AE_FW_NAME_CUSTOM1;
440 }
441 
442 static uint32_t
get_objs_num(struct adf_accel_dev * accel_dev)443 get_objs_num(struct adf_accel_dev *accel_dev)
444 {
445 	return 1;
446 }
447 
448 static uint32_t
get_obj_cfg_ae_mask(struct adf_accel_dev * accel_dev,enum adf_accel_unit_services services)449 get_obj_cfg_ae_mask(struct adf_accel_dev *accel_dev,
450 		    enum adf_accel_unit_services services)
451 {
452 	return accel_dev->hw_device->ae_mask;
453 }
454 
455 void
adf_init_hw_data_200xx(struct adf_hw_device_data * hw_data)456 adf_init_hw_data_200xx(struct adf_hw_device_data *hw_data)
457 {
458 	hw_data->dev_class = &qat_200xx_class;
459 	hw_data->instance_id = qat_200xx_class.instances++;
460 	hw_data->num_banks = ADF_200XX_ETR_MAX_BANKS;
461 	hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
462 	hw_data->num_accel = ADF_200XX_MAX_ACCELERATORS;
463 	hw_data->num_logical_accel = 1;
464 	hw_data->num_engines = ADF_200XX_MAX_ACCELENGINES;
465 	hw_data->tx_rx_gap = ADF_200XX_RX_RINGS_OFFSET;
466 	hw_data->tx_rings_mask = ADF_200XX_TX_RINGS_MASK;
467 	hw_data->alloc_irq = adf_isr_resource_alloc;
468 	hw_data->free_irq = adf_isr_resource_free;
469 	hw_data->enable_error_correction = adf_enable_error_correction;
470 	hw_data->check_uncorrectable_error = adf_check_uncorrectable_error;
471 	hw_data->print_err_registers = adf_print_err_registers;
472 	hw_data->disable_error_interrupts = adf_disable_error_interrupts;
473 	hw_data->get_accel_mask = get_accel_mask;
474 	hw_data->get_ae_mask = get_ae_mask;
475 	hw_data->get_num_accels = get_num_accels;
476 	hw_data->get_num_aes = get_num_aes;
477 	hw_data->get_sram_bar_id = get_sram_bar_id;
478 	hw_data->get_etr_bar_id = get_etr_bar_id;
479 	hw_data->get_misc_bar_id = get_misc_bar_id;
480 	hw_data->get_arb_info = get_arb_info;
481 	hw_data->get_admin_info = get_admin_info;
482 	hw_data->get_errsou_offset = get_errsou_offset;
483 	hw_data->get_clock_speed = get_clock_speed;
484 	hw_data->get_sku = get_sku;
485 	hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE;
486 	hw_data->fw_name = ADF_200XX_FW;
487 	hw_data->fw_mmp_name = ADF_200XX_MMP;
488 	hw_data->init_admin_comms = adf_init_admin_comms;
489 	hw_data->exit_admin_comms = adf_exit_admin_comms;
490 	hw_data->disable_iov = adf_disable_sriov;
491 	hw_data->send_admin_init = adf_send_admin_init;
492 	hw_data->init_arb = adf_init_gen2_arb;
493 	hw_data->exit_arb = adf_exit_arb;
494 	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
495 	hw_data->enable_ints = adf_enable_ints;
496 	hw_data->set_ssm_wdtimer = adf_set_ssm_wdtimer;
497 	hw_data->check_slice_hang = adf_check_slice_hang;
498 	hw_data->restore_device = adf_dev_restore;
499 	hw_data->reset_device = adf_reset_flr;
500 	hw_data->measure_clock = measure_clock;
501 	hw_data->get_ae_clock = get_ae_clock;
502 	hw_data->reset_device = adf_reset_flr;
503 	hw_data->get_objs_num = get_objs_num;
504 	hw_data->get_obj_name = get_obj_name;
505 	hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask;
506 	hw_data->get_accel_cap = adf_200xx_get_hw_cap;
507 	hw_data->clock_frequency = ADF_200XX_AE_FREQ;
508 	hw_data->extended_dc_capabilities = 0;
509 	hw_data->get_storage_enabled = get_storage_enabled;
510 	hw_data->query_storage_cap = 1;
511 	hw_data->get_heartbeat_status = adf_get_heartbeat_status;
512 	hw_data->get_ae_clock = get_ae_clock;
513 	hw_data->storage_enable = 0;
514 	hw_data->get_ring_to_svc_map = adf_cfg_get_services_enabled;
515 	hw_data->config_device = adf_config_device;
516 	hw_data->set_asym_rings_mask = adf_cfg_set_asym_rings_mask;
517 	hw_data->ring_to_svc_map = ADF_DEFAULT_RING_TO_SRV_MAP;
518 	hw_data->pre_reset = adf_dev_pre_reset;
519 	hw_data->post_reset = adf_dev_post_reset;
520 
521 	adf_gen2_init_hw_csr_info(&hw_data->csr_info);
522 	adf_gen2_init_pf_pfvf_ops(&hw_data->csr_info.pfvf_ops);
523 }
524 
525 void
adf_clean_hw_data_200xx(struct adf_hw_device_data * hw_data)526 adf_clean_hw_data_200xx(struct adf_hw_device_data *hw_data)
527 {
528 	hw_data->dev_class->instances--;
529 }
530