xref: /linux/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h (revision 25489a4f556414445d342951615178368ee45cde)
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #ifndef ADF_ACCEL_DEVICES_H_
4 #define ADF_ACCEL_DEVICES_H_
5 #include <linux/interrupt.h>
6 #include <linux/module.h>
7 #include <linux/list.h>
8 #include <linux/io.h>
9 #include <linux/pci.h>
10 #include <linux/ratelimit.h>
11 #include <linux/types.h>
12 #include <linux/qat/qat_mig_dev.h>
13 #include <linux/wordpart.h>
14 #include "adf_cfg_common.h"
15 #include "adf_dc.h"
16 #include "adf_rl.h"
17 #include "adf_telemetry.h"
18 #include "adf_pfvf_msg.h"
19 #include "icp_qat_hw.h"
20 
21 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
22 #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
23 #define ADF_C62X_DEVICE_NAME "c6xx"
24 #define ADF_C62XVF_DEVICE_NAME "c6xxvf"
25 #define ADF_C3XXX_DEVICE_NAME "c3xxx"
26 #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
27 #define ADF_4XXX_DEVICE_NAME "4xxx"
28 #define ADF_420XX_DEVICE_NAME "420xx"
29 #define ADF_6XXX_DEVICE_NAME "6xxx"
30 #define PCI_DEVICE_ID_INTEL_QAT_4XXX 0x4940
31 #define PCI_DEVICE_ID_INTEL_QAT_4XXXIOV 0x4941
32 #define PCI_DEVICE_ID_INTEL_QAT_401XX 0x4942
33 #define PCI_DEVICE_ID_INTEL_QAT_401XXIOV 0x4943
34 #define PCI_DEVICE_ID_INTEL_QAT_402XX 0x4944
35 #define PCI_DEVICE_ID_INTEL_QAT_402XXIOV 0x4945
36 #define PCI_DEVICE_ID_INTEL_QAT_420XX 0x4946
37 #define PCI_DEVICE_ID_INTEL_QAT_420XXIOV 0x4947
38 #define PCI_DEVICE_ID_INTEL_QAT_6XXX 0x4948
39 #define PCI_DEVICE_ID_INTEL_QAT_6XXX_IOV 0x4949
40 
41 #define ADF_DEVICE_FUSECTL_OFFSET 0x40
42 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
43 #define ADF_DEVICE_FUSECTL_MASK 0x80000000
44 #define ADF_PCI_MAX_BARS 3
45 #define ADF_DEVICE_NAME_LENGTH 32
46 #define ADF_ETR_MAX_RINGS_PER_BANK 16
47 #define ADF_MAX_MSIX_VECTOR_NAME 48
48 #define ADF_DEVICE_NAME_PREFIX "qat_"
49 
50 enum adf_accel_capabilities {
51 	ADF_ACCEL_CAPABILITIES_NULL = 0,
52 	ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
53 	ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
54 	ADF_ACCEL_CAPABILITIES_CIPHER = 4,
55 	ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
56 	ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
57 	ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64,
58 	ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
59 };
60 
61 enum adf_fuses {
62 	ADF_FUSECTL0,
63 	ADF_FUSECTL1,
64 	ADF_FUSECTL2,
65 	ADF_FUSECTL3,
66 	ADF_FUSECTL4,
67 	ADF_FUSECTL5,
68 	ADF_MAX_FUSES
69 };
70 
71 struct adf_bar {
72 	resource_size_t base_addr;
73 	void __iomem *virt_addr;
74 	resource_size_t size;
75 };
76 
77 struct adf_irq {
78 	bool enabled;
79 	char name[ADF_MAX_MSIX_VECTOR_NAME];
80 };
81 
82 struct adf_accel_msix {
83 	struct adf_irq *irqs;
84 	u32 num_entries;
85 };
86 
87 struct adf_accel_pci {
88 	struct pci_dev *pci_dev;
89 	struct adf_accel_msix msix_entries;
90 	struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
91 	u8 revid;
92 	u8 sku;
93 };
94 
95 enum dev_state {
96 	DEV_DOWN = 0,
97 	DEV_UP
98 };
99 
100 enum dev_sku_info {
101 	DEV_SKU_1 = 0,
102 	DEV_SKU_2,
103 	DEV_SKU_3,
104 	DEV_SKU_4,
105 	DEV_SKU_VF,
106 	DEV_SKU_UNKNOWN,
107 };
108 
109 enum ras_errors {
110 	ADF_RAS_CORR,
111 	ADF_RAS_UNCORR,
112 	ADF_RAS_FATAL,
113 	ADF_RAS_ERRORS,
114 };
115 
116 struct adf_error_counters {
117 	atomic_t counter[ADF_RAS_ERRORS];
118 	bool sysfs_added;
119 	bool enabled;
120 };
121 
122 static inline const char *get_sku_info(enum dev_sku_info info)
123 {
124 	switch (info) {
125 	case DEV_SKU_1:
126 		return "SKU1";
127 	case DEV_SKU_2:
128 		return "SKU2";
129 	case DEV_SKU_3:
130 		return "SKU3";
131 	case DEV_SKU_4:
132 		return "SKU4";
133 	case DEV_SKU_VF:
134 		return "SKUVF";
135 	case DEV_SKU_UNKNOWN:
136 	default:
137 		break;
138 	}
139 	return "Unknown SKU";
140 }
141 
142 struct adf_hw_device_class {
143 	const char *name;
144 	const enum adf_device_type type;
145 	u32 instances;
146 };
147 
148 struct arb_info {
149 	u32 arb_cfg;
150 	u32 arb_offset;
151 	u32 wt2sam_offset;
152 };
153 
154 struct admin_info {
155 	u32 admin_msg_ur;
156 	u32 admin_msg_lr;
157 	u32 mailbox_offset;
158 };
159 
160 struct ring_config {
161 	u64 base;
162 	u32 config;
163 	u32 head;
164 	u32 tail;
165 	u32 reserved0;
166 };
167 
168 struct bank_state {
169 	u32 ringstat0;
170 	u32 ringstat1;
171 	u32 ringuostat;
172 	u32 ringestat;
173 	u32 ringnestat;
174 	u32 ringnfstat;
175 	u32 ringfstat;
176 	u32 ringcstat0;
177 	u32 ringcstat1;
178 	u32 ringcstat2;
179 	u32 ringcstat3;
180 	u32 iaintflagen;
181 	u32 iaintflagreg;
182 	u32 iaintflagsrcsel0;
183 	u32 iaintflagsrcsel1;
184 	u32 iaintcolen;
185 	u32 iaintcolctl;
186 	u32 iaintflagandcolen;
187 	u32 ringexpstat;
188 	u32 ringexpintenable;
189 	u32 ringsrvarben;
190 	u32 reserved0;
191 	struct ring_config rings[ADF_ETR_MAX_RINGS_PER_BANK];
192 };
193 
194 struct adf_hw_csr_ops {
195 	u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
196 	u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
197 				  u32 ring);
198 	void (*write_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
199 				    u32 ring, u32 value);
200 	u32 (*read_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
201 				  u32 ring);
202 	void (*write_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
203 				    u32 ring, u32 value);
204 	u32 (*read_csr_stat)(void __iomem *csr_base_addr, u32 bank);
205 	u32 (*read_csr_uo_stat)(void __iomem *csr_base_addr, u32 bank);
206 	u32 (*read_csr_e_stat)(void __iomem *csr_base_addr, u32 bank);
207 	u32 (*read_csr_ne_stat)(void __iomem *csr_base_addr, u32 bank);
208 	u32 (*read_csr_nf_stat)(void __iomem *csr_base_addr, u32 bank);
209 	u32 (*read_csr_f_stat)(void __iomem *csr_base_addr, u32 bank);
210 	u32 (*read_csr_c_stat)(void __iomem *csr_base_addr, u32 bank);
211 	u32 (*read_csr_exp_stat)(void __iomem *csr_base_addr, u32 bank);
212 	u32 (*read_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank);
213 	void (*write_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank,
214 				     u32 value);
215 	u32 (*read_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
216 				    u32 ring);
217 	void (*write_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
218 				      u32 ring, u32 value);
219 	dma_addr_t (*read_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
220 					 u32 ring);
221 	void (*write_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
222 				    u32 ring, dma_addr_t addr);
223 	u32 (*read_csr_int_en)(void __iomem *csr_base_addr, u32 bank);
224 	void (*write_csr_int_en)(void __iomem *csr_base_addr, u32 bank,
225 				 u32 value);
226 	u32 (*read_csr_int_flag)(void __iomem *csr_base_addr, u32 bank);
227 	void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank,
228 				   u32 value);
229 	u32 (*read_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
230 	void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
231 	void (*write_csr_int_srcsel_w_val)(void __iomem *csr_base_addr,
232 					   u32 bank, u32 value);
233 	u32 (*read_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank);
234 	void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank,
235 				     u32 value);
236 	u32 (*read_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank);
237 	void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank,
238 				      u32 value);
239 	u32 (*read_csr_int_flag_and_col)(void __iomem *csr_base_addr,
240 					 u32 bank);
241 	void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr,
242 					   u32 bank, u32 value);
243 	u32 (*read_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank);
244 	void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank,
245 					  u32 value);
246 	u32 (*get_int_col_ctl_enable_mask)(void);
247 };
248 
249 struct adf_cfg_device_data;
250 struct adf_accel_dev;
251 struct adf_etr_data;
252 struct adf_etr_ring_data;
253 
254 struct adf_ras_ops {
255 	void (*enable_ras_errors)(struct adf_accel_dev *accel_dev);
256 	void (*disable_ras_errors)(struct adf_accel_dev *accel_dev);
257 	bool (*handle_interrupt)(struct adf_accel_dev *accel_dev,
258 				 bool *reset_required);
259 };
260 
261 struct adf_pfvf_ops {
262 	int (*enable_comms)(struct adf_accel_dev *accel_dev);
263 	u32 (*get_pf2vf_offset)(u32 i);
264 	u32 (*get_vf2pf_offset)(u32 i);
265 	void (*enable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
266 	void (*disable_all_vf2pf_interrupts)(void __iomem *pmisc_addr);
267 	u32 (*disable_pending_vf2pf_interrupts)(void __iomem *pmisc_addr);
268 	int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
269 			u32 pfvf_offset, struct mutex *csr_lock);
270 	struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev,
271 					u32 pfvf_offset, u8 compat_ver);
272 };
273 
274 struct adf_dc_ops {
275 	int (*build_comp_block)(void *ctx, enum adf_dc_algo algo);
276 	int (*build_decomp_block)(void *ctx, enum adf_dc_algo algo);
277 };
278 
279 struct qat_migdev_ops {
280 	int (*init)(struct qat_mig_dev *mdev);
281 	void (*cleanup)(struct qat_mig_dev *mdev);
282 	void (*reset)(struct qat_mig_dev *mdev);
283 	int (*open)(struct qat_mig_dev *mdev);
284 	void (*close)(struct qat_mig_dev *mdev);
285 	int (*suspend)(struct qat_mig_dev *mdev);
286 	int (*resume)(struct qat_mig_dev *mdev);
287 	int (*save_state)(struct qat_mig_dev *mdev);
288 	int (*save_setup)(struct qat_mig_dev *mdev);
289 	int (*load_state)(struct qat_mig_dev *mdev);
290 	int (*load_setup)(struct qat_mig_dev *mdev, int size);
291 };
292 
293 struct adf_dev_err_mask {
294 	u32 cppagentcmdpar_mask;
295 	u32 parerr_ath_cph_mask;
296 	u32 parerr_cpr_xlt_mask;
297 	u32 parerr_dcpr_ucs_mask;
298 	u32 parerr_pke_mask;
299 	u32 parerr_wat_wcp_mask;
300 	u32 ssmfeatren_mask;
301 };
302 
303 struct adf_hw_device_data {
304 	struct adf_hw_device_class *dev_class;
305 	u32 (*get_accel_mask)(struct adf_hw_device_data *self);
306 	u32 (*get_ae_mask)(struct adf_hw_device_data *self);
307 	u32 (*get_accel_cap)(struct adf_accel_dev *accel_dev);
308 	u32 (*get_sram_bar_id)(struct adf_hw_device_data *self);
309 	u32 (*get_misc_bar_id)(struct adf_hw_device_data *self);
310 	u32 (*get_etr_bar_id)(struct adf_hw_device_data *self);
311 	u32 (*get_num_aes)(struct adf_hw_device_data *self);
312 	u32 (*get_num_accels)(struct adf_hw_device_data *self);
313 	void (*get_arb_info)(struct arb_info *arb_csrs_info);
314 	void (*get_admin_info)(struct admin_info *admin_csrs_info);
315 	enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
316 	u16 (*get_ring_to_svc_map)(struct adf_accel_dev *accel_dev);
317 	int (*alloc_irq)(struct adf_accel_dev *accel_dev);
318 	void (*free_irq)(struct adf_accel_dev *accel_dev);
319 	void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
320 	int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
321 	void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
322 	int (*send_admin_init)(struct adf_accel_dev *accel_dev);
323 	int (*start_timer)(struct adf_accel_dev *accel_dev);
324 	void (*stop_timer)(struct adf_accel_dev *accel_dev);
325 	void (*check_hb_ctrs)(struct adf_accel_dev *accel_dev);
326 	uint32_t (*get_hb_clock)(struct adf_hw_device_data *self);
327 	int (*measure_clock)(struct adf_accel_dev *accel_dev);
328 	int (*init_arb)(struct adf_accel_dev *accel_dev);
329 	void (*exit_arb)(struct adf_accel_dev *accel_dev);
330 	const u32 *(*get_arb_mapping)(struct adf_accel_dev *accel_dev);
331 	int (*init_device)(struct adf_accel_dev *accel_dev);
332 	int (*enable_pm)(struct adf_accel_dev *accel_dev);
333 	bool (*handle_pm_interrupt)(struct adf_accel_dev *accel_dev);
334 	void (*disable_iov)(struct adf_accel_dev *accel_dev);
335 	void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
336 				      bool enable);
337 	void (*enable_ints)(struct adf_accel_dev *accel_dev);
338 	void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
339 	int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
340 	int (*bank_state_save)(struct adf_accel_dev *accel_dev, u32 bank_number,
341 			       struct bank_state *state);
342 	int (*bank_state_restore)(struct adf_accel_dev *accel_dev,
343 				  u32 bank_number, struct bank_state *state);
344 	void (*reset_device)(struct adf_accel_dev *accel_dev);
345 	void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
346 	const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
347 	u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev);
348 	int (*uof_get_obj_type)(struct adf_accel_dev *accel_dev, u32 obj_num);
349 	u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
350 	int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask);
351 	u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
352 	int (*dev_config)(struct adf_accel_dev *accel_dev);
353 	bool (*services_supported)(unsigned long mask);
354 	struct adf_pfvf_ops pfvf_ops;
355 	struct adf_hw_csr_ops csr_ops;
356 	struct adf_dc_ops dc_ops;
357 	struct adf_ras_ops ras_ops;
358 	struct adf_dev_err_mask dev_err_mask;
359 	struct adf_rl_hw_data rl_data;
360 	struct adf_tl_hw_data tl_data;
361 	struct qat_migdev_ops vfmig_ops;
362 	const char *fw_name;
363 	const char *fw_mmp_name;
364 	u32 fuses[ADF_MAX_FUSES];
365 	u32 straps;
366 	u32 accel_capabilities_mask;
367 	u32 extended_dc_capabilities;
368 	u16 fw_capabilities;
369 	u32 clock_frequency;
370 	u32 instance_id;
371 	u16 accel_mask;
372 	u32 ae_mask;
373 	u32 admin_ae_mask;
374 	u16 tx_rings_mask;
375 	u16 ring_to_svc_map;
376 	u32 thd_to_arb_map[ICP_QAT_HW_AE_DELIMITER];
377 	u8 tx_rx_gap;
378 	u8 num_banks;
379 	u16 num_banks_per_vf;
380 	u8 num_rings_per_bank;
381 	u8 num_accel;
382 	u8 num_logical_accel;
383 	u8 num_engines;
384 	u32 num_hb_ctrs;
385 	u8 num_rps;
386 };
387 
388 /* CSR write macro */
389 #define ADF_CSR_WR(csr_base, csr_offset, val) \
390 	__raw_writel(val, csr_base + csr_offset)
391 /*
392  * CSR write macro to handle cases where the high and low
393  * offsets are sparsely located.
394  */
395 #define ADF_CSR_WR64_LO_HI(csr_base, csr_low_offset, csr_high_offset, val)	\
396 do {										\
397 	ADF_CSR_WR(csr_base, csr_low_offset, lower_32_bits(val));		\
398 	ADF_CSR_WR(csr_base, csr_high_offset, upper_32_bits(val));		\
399 } while (0)
400 
401 /* CSR read macro */
402 #define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
403 
404 #define ADF_CFG_NUM_SERVICES	4
405 #define ADF_SRV_TYPE_BIT_LEN	3
406 #define ADF_SRV_TYPE_MASK	0x7
407 #define ADF_AE_ADMIN_THREAD	7
408 #define ADF_NUM_THREADS_PER_AE	8
409 #define ADF_NUM_PKE_STRAND	2
410 #define ADF_AE_STRAND0_THREAD	8
411 #define ADF_AE_STRAND1_THREAD	9
412 
413 #define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
414 #define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
415 #define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
416 #define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
417 #define GET_NUM_RINGS_PER_BANK(accel_dev) \
418 	GET_HW_DATA(accel_dev)->num_rings_per_bank
419 #define GET_SRV_TYPE(accel_dev, idx) \
420 	(((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \
421 	& ADF_SRV_TYPE_MASK)
422 #define GET_ERR_MASK(accel_dev) (&GET_HW_DATA(accel_dev)->dev_err_mask)
423 #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
424 #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
425 #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
426 #define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
427 #define GET_VFMIG_OPS(accel_dev) (&(accel_dev)->hw_device->vfmig_ops)
428 #define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data
429 #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
430 
431 struct adf_admin_comms;
432 struct icp_qat_fw_loader_handle;
433 struct adf_fw_loader_data {
434 	struct icp_qat_fw_loader_handle *fw_loader;
435 	const struct firmware *uof_fw;
436 	const struct firmware *mmp_fw;
437 };
438 
439 struct adf_accel_vf_info {
440 	struct adf_accel_dev *accel_dev;
441 	struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
442 	struct mutex pfvf_mig_lock; /* protects PFVF state for migration */
443 	struct ratelimit_state vf2pf_ratelimit;
444 	u32 vf_nr;
445 	bool init;
446 	bool restarting;
447 	u8 vf_compat_ver;
448 	/*
449 	 * Private area used for device migration.
450 	 * Memory allocation and free is managed by migration driver.
451 	 */
452 	void *mig_priv;
453 };
454 
455 struct adf_dc_data {
456 	u8 *ovf_buff;
457 	size_t ovf_buff_sz;
458 	dma_addr_t ovf_buff_p;
459 };
460 
461 struct adf_pm {
462 	struct dentry *debugfs_pm_status;
463 	bool present;
464 	int idle_irq_counters;
465 	int throttle_irq_counters;
466 	int fw_irq_counters;
467 	int host_ack_counter;
468 	int host_nack_counter;
469 	ssize_t (*print_pm_status)(struct adf_accel_dev *accel_dev,
470 				   char __user *buf, size_t count, loff_t *pos);
471 };
472 
473 struct adf_sysfs {
474 	int ring_num;
475 	struct rw_semaphore lock; /* protects access to the fields in this struct */
476 };
477 
478 struct adf_accel_dev {
479 	struct adf_etr_data *transport;
480 	struct adf_hw_device_data *hw_device;
481 	struct adf_cfg_device_data *cfg;
482 	struct adf_fw_loader_data *fw_loader;
483 	struct adf_admin_comms *admin;
484 	struct adf_telemetry *telemetry;
485 	struct adf_dc_data *dc_data;
486 	struct adf_pm power_management;
487 	struct list_head crypto_list;
488 	struct list_head compression_list;
489 	unsigned long status;
490 	atomic_t ref_count;
491 	struct dentry *debugfs_dir;
492 	struct dentry *fw_cntr_dbgfile;
493 	struct dentry *cnv_dbgfile;
494 	struct list_head list;
495 	struct module *owner;
496 	struct adf_accel_pci accel_pci_dev;
497 	struct adf_timer *timer;
498 	struct adf_heartbeat *heartbeat;
499 	struct adf_rl *rate_limiting;
500 	struct adf_sysfs sysfs;
501 	union {
502 		struct {
503 			/* protects VF2PF interrupts access */
504 			spinlock_t vf2pf_ints_lock;
505 			/* vf_info is non-zero when SR-IOV is init'ed */
506 			struct adf_accel_vf_info *vf_info;
507 		} pf;
508 		struct {
509 			bool irq_enabled;
510 			char irq_name[ADF_MAX_MSIX_VECTOR_NAME];
511 			struct tasklet_struct pf2vf_bh_tasklet;
512 			struct mutex vf2pf_lock; /* protect CSR access */
513 			struct completion msg_received;
514 			struct pfvf_message response; /* temp field holding pf2vf response */
515 			u8 pf_compat_ver;
516 		} vf;
517 	};
518 	struct adf_error_counters ras_errors;
519 	struct mutex state_lock; /* protect state of the device */
520 	bool is_vf;
521 	bool autoreset_on_error;
522 	u32 accel_id;
523 };
524 #endif
525