xref: /linux/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1  /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
2  /* Copyright(c) 2014 - 2020 Intel Corporation */
3  #ifndef ADF_ACCEL_DEVICES_H_
4  #define ADF_ACCEL_DEVICES_H_
5  #include <linux/interrupt.h>
6  #include <linux/module.h>
7  #include <linux/list.h>
8  #include <linux/io.h>
9  #include <linux/pci.h>
10  #include <linux/ratelimit.h>
11  #include <linux/types.h>
12  #include <linux/qat/qat_mig_dev.h>
13  #include "adf_cfg_common.h"
14  #include "adf_rl.h"
15  #include "adf_telemetry.h"
16  #include "adf_pfvf_msg.h"
17  #include "icp_qat_hw.h"
18  
19  #define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
20  #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
21  #define ADF_C62X_DEVICE_NAME "c6xx"
22  #define ADF_C62XVF_DEVICE_NAME "c6xxvf"
23  #define ADF_C3XXX_DEVICE_NAME "c3xxx"
24  #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
25  #define ADF_4XXX_DEVICE_NAME "4xxx"
26  #define ADF_420XX_DEVICE_NAME "420xx"
27  #define ADF_4XXX_PCI_DEVICE_ID 0x4940
28  #define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
29  #define ADF_401XX_PCI_DEVICE_ID 0x4942
30  #define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
31  #define ADF_402XX_PCI_DEVICE_ID 0x4944
32  #define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
33  #define ADF_420XX_PCI_DEVICE_ID 0x4946
34  #define ADF_420XXIOV_PCI_DEVICE_ID 0x4947
35  #define ADF_DEVICE_FUSECTL_OFFSET 0x40
36  #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
37  #define ADF_DEVICE_FUSECTL_MASK 0x80000000
38  #define ADF_PCI_MAX_BARS 3
39  #define ADF_DEVICE_NAME_LENGTH 32
40  #define ADF_ETR_MAX_RINGS_PER_BANK 16
41  #define ADF_MAX_MSIX_VECTOR_NAME 48
42  #define ADF_DEVICE_NAME_PREFIX "qat_"
43  
44  enum adf_accel_capabilities {
45  	ADF_ACCEL_CAPABILITIES_NULL = 0,
46  	ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
47  	ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
48  	ADF_ACCEL_CAPABILITIES_CIPHER = 4,
49  	ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
50  	ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
51  	ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64,
52  	ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
53  };
54  
55  struct adf_bar {
56  	resource_size_t base_addr;
57  	void __iomem *virt_addr;
58  	resource_size_t size;
59  };
60  
61  struct adf_irq {
62  	bool enabled;
63  	char name[ADF_MAX_MSIX_VECTOR_NAME];
64  };
65  
66  struct adf_accel_msix {
67  	struct adf_irq *irqs;
68  	u32 num_entries;
69  };
70  
71  struct adf_accel_pci {
72  	struct pci_dev *pci_dev;
73  	struct adf_accel_msix msix_entries;
74  	struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
75  	u8 revid;
76  	u8 sku;
77  };
78  
79  enum dev_state {
80  	DEV_DOWN = 0,
81  	DEV_UP
82  };
83  
84  enum dev_sku_info {
85  	DEV_SKU_1 = 0,
86  	DEV_SKU_2,
87  	DEV_SKU_3,
88  	DEV_SKU_4,
89  	DEV_SKU_VF,
90  	DEV_SKU_UNKNOWN,
91  };
92  
93  enum ras_errors {
94  	ADF_RAS_CORR,
95  	ADF_RAS_UNCORR,
96  	ADF_RAS_FATAL,
97  	ADF_RAS_ERRORS,
98  };
99  
100  struct adf_error_counters {
101  	atomic_t counter[ADF_RAS_ERRORS];
102  	bool sysfs_added;
103  	bool enabled;
104  };
105  
get_sku_info(enum dev_sku_info info)106  static inline const char *get_sku_info(enum dev_sku_info info)
107  {
108  	switch (info) {
109  	case DEV_SKU_1:
110  		return "SKU1";
111  	case DEV_SKU_2:
112  		return "SKU2";
113  	case DEV_SKU_3:
114  		return "SKU3";
115  	case DEV_SKU_4:
116  		return "SKU4";
117  	case DEV_SKU_VF:
118  		return "SKUVF";
119  	case DEV_SKU_UNKNOWN:
120  	default:
121  		break;
122  	}
123  	return "Unknown SKU";
124  }
125  
126  struct adf_hw_device_class {
127  	const char *name;
128  	const enum adf_device_type type;
129  	u32 instances;
130  };
131  
132  struct arb_info {
133  	u32 arb_cfg;
134  	u32 arb_offset;
135  	u32 wt2sam_offset;
136  };
137  
138  struct admin_info {
139  	u32 admin_msg_ur;
140  	u32 admin_msg_lr;
141  	u32 mailbox_offset;
142  };
143  
144  struct ring_config {
145  	u64 base;
146  	u32 config;
147  	u32 head;
148  	u32 tail;
149  	u32 reserved0;
150  };
151  
152  struct bank_state {
153  	u32 ringstat0;
154  	u32 ringstat1;
155  	u32 ringuostat;
156  	u32 ringestat;
157  	u32 ringnestat;
158  	u32 ringnfstat;
159  	u32 ringfstat;
160  	u32 ringcstat0;
161  	u32 ringcstat1;
162  	u32 ringcstat2;
163  	u32 ringcstat3;
164  	u32 iaintflagen;
165  	u32 iaintflagreg;
166  	u32 iaintflagsrcsel0;
167  	u32 iaintflagsrcsel1;
168  	u32 iaintcolen;
169  	u32 iaintcolctl;
170  	u32 iaintflagandcolen;
171  	u32 ringexpstat;
172  	u32 ringexpintenable;
173  	u32 ringsrvarben;
174  	u32 reserved0;
175  	struct ring_config rings[ADF_ETR_MAX_RINGS_PER_BANK];
176  };
177  
178  struct adf_hw_csr_ops {
179  	u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
180  	u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
181  				  u32 ring);
182  	void (*write_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
183  				    u32 ring, u32 value);
184  	u32 (*read_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
185  				  u32 ring);
186  	void (*write_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
187  				    u32 ring, u32 value);
188  	u32 (*read_csr_stat)(void __iomem *csr_base_addr, u32 bank);
189  	u32 (*read_csr_uo_stat)(void __iomem *csr_base_addr, u32 bank);
190  	u32 (*read_csr_e_stat)(void __iomem *csr_base_addr, u32 bank);
191  	u32 (*read_csr_ne_stat)(void __iomem *csr_base_addr, u32 bank);
192  	u32 (*read_csr_nf_stat)(void __iomem *csr_base_addr, u32 bank);
193  	u32 (*read_csr_f_stat)(void __iomem *csr_base_addr, u32 bank);
194  	u32 (*read_csr_c_stat)(void __iomem *csr_base_addr, u32 bank);
195  	u32 (*read_csr_exp_stat)(void __iomem *csr_base_addr, u32 bank);
196  	u32 (*read_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank);
197  	void (*write_csr_exp_int_en)(void __iomem *csr_base_addr, u32 bank,
198  				     u32 value);
199  	u32 (*read_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
200  				    u32 ring);
201  	void (*write_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
202  				      u32 ring, u32 value);
203  	dma_addr_t (*read_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
204  					 u32 ring);
205  	void (*write_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
206  				    u32 ring, dma_addr_t addr);
207  	u32 (*read_csr_int_en)(void __iomem *csr_base_addr, u32 bank);
208  	void (*write_csr_int_en)(void __iomem *csr_base_addr, u32 bank,
209  				 u32 value);
210  	u32 (*read_csr_int_flag)(void __iomem *csr_base_addr, u32 bank);
211  	void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank,
212  				   u32 value);
213  	u32 (*read_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
214  	void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
215  	void (*write_csr_int_srcsel_w_val)(void __iomem *csr_base_addr,
216  					   u32 bank, u32 value);
217  	u32 (*read_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank);
218  	void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank,
219  				     u32 value);
220  	u32 (*read_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank);
221  	void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank,
222  				      u32 value);
223  	u32 (*read_csr_int_flag_and_col)(void __iomem *csr_base_addr,
224  					 u32 bank);
225  	void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr,
226  					   u32 bank, u32 value);
227  	u32 (*read_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank);
228  	void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank,
229  					  u32 value);
230  	u32 (*get_int_col_ctl_enable_mask)(void);
231  };
232  
233  struct adf_cfg_device_data;
234  struct adf_accel_dev;
235  struct adf_etr_data;
236  struct adf_etr_ring_data;
237  
238  struct adf_ras_ops {
239  	void (*enable_ras_errors)(struct adf_accel_dev *accel_dev);
240  	void (*disable_ras_errors)(struct adf_accel_dev *accel_dev);
241  	bool (*handle_interrupt)(struct adf_accel_dev *accel_dev,
242  				 bool *reset_required);
243  };
244  
245  struct adf_pfvf_ops {
246  	int (*enable_comms)(struct adf_accel_dev *accel_dev);
247  	u32 (*get_pf2vf_offset)(u32 i);
248  	u32 (*get_vf2pf_offset)(u32 i);
249  	void (*enable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
250  	void (*disable_all_vf2pf_interrupts)(void __iomem *pmisc_addr);
251  	u32 (*disable_pending_vf2pf_interrupts)(void __iomem *pmisc_addr);
252  	int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
253  			u32 pfvf_offset, struct mutex *csr_lock);
254  	struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev,
255  					u32 pfvf_offset, u8 compat_ver);
256  };
257  
258  struct adf_dc_ops {
259  	void (*build_deflate_ctx)(void *ctx);
260  };
261  
262  struct qat_migdev_ops {
263  	int (*init)(struct qat_mig_dev *mdev);
264  	void (*cleanup)(struct qat_mig_dev *mdev);
265  	void (*reset)(struct qat_mig_dev *mdev);
266  	int (*open)(struct qat_mig_dev *mdev);
267  	void (*close)(struct qat_mig_dev *mdev);
268  	int (*suspend)(struct qat_mig_dev *mdev);
269  	int (*resume)(struct qat_mig_dev *mdev);
270  	int (*save_state)(struct qat_mig_dev *mdev);
271  	int (*save_setup)(struct qat_mig_dev *mdev);
272  	int (*load_state)(struct qat_mig_dev *mdev);
273  	int (*load_setup)(struct qat_mig_dev *mdev, int size);
274  };
275  
276  struct adf_dev_err_mask {
277  	u32 cppagentcmdpar_mask;
278  	u32 parerr_ath_cph_mask;
279  	u32 parerr_cpr_xlt_mask;
280  	u32 parerr_dcpr_ucs_mask;
281  	u32 parerr_pke_mask;
282  	u32 parerr_wat_wcp_mask;
283  	u32 ssmfeatren_mask;
284  };
285  
286  struct adf_hw_device_data {
287  	struct adf_hw_device_class *dev_class;
288  	u32 (*get_accel_mask)(struct adf_hw_device_data *self);
289  	u32 (*get_ae_mask)(struct adf_hw_device_data *self);
290  	u32 (*get_accel_cap)(struct adf_accel_dev *accel_dev);
291  	u32 (*get_sram_bar_id)(struct adf_hw_device_data *self);
292  	u32 (*get_misc_bar_id)(struct adf_hw_device_data *self);
293  	u32 (*get_etr_bar_id)(struct adf_hw_device_data *self);
294  	u32 (*get_num_aes)(struct adf_hw_device_data *self);
295  	u32 (*get_num_accels)(struct adf_hw_device_data *self);
296  	void (*get_arb_info)(struct arb_info *arb_csrs_info);
297  	void (*get_admin_info)(struct admin_info *admin_csrs_info);
298  	enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
299  	u16 (*get_ring_to_svc_map)(struct adf_accel_dev *accel_dev);
300  	int (*alloc_irq)(struct adf_accel_dev *accel_dev);
301  	void (*free_irq)(struct adf_accel_dev *accel_dev);
302  	void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
303  	int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
304  	void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
305  	int (*send_admin_init)(struct adf_accel_dev *accel_dev);
306  	int (*start_timer)(struct adf_accel_dev *accel_dev);
307  	void (*stop_timer)(struct adf_accel_dev *accel_dev);
308  	void (*check_hb_ctrs)(struct adf_accel_dev *accel_dev);
309  	uint32_t (*get_hb_clock)(struct adf_hw_device_data *self);
310  	int (*measure_clock)(struct adf_accel_dev *accel_dev);
311  	int (*init_arb)(struct adf_accel_dev *accel_dev);
312  	void (*exit_arb)(struct adf_accel_dev *accel_dev);
313  	const u32 *(*get_arb_mapping)(struct adf_accel_dev *accel_dev);
314  	int (*init_device)(struct adf_accel_dev *accel_dev);
315  	int (*enable_pm)(struct adf_accel_dev *accel_dev);
316  	bool (*handle_pm_interrupt)(struct adf_accel_dev *accel_dev);
317  	void (*disable_iov)(struct adf_accel_dev *accel_dev);
318  	void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
319  				      bool enable);
320  	void (*enable_ints)(struct adf_accel_dev *accel_dev);
321  	void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
322  	int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
323  	int (*bank_state_save)(struct adf_accel_dev *accel_dev, u32 bank_number,
324  			       struct bank_state *state);
325  	int (*bank_state_restore)(struct adf_accel_dev *accel_dev,
326  				  u32 bank_number, struct bank_state *state);
327  	void (*reset_device)(struct adf_accel_dev *accel_dev);
328  	void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
329  	const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
330  	u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev);
331  	int (*uof_get_obj_type)(struct adf_accel_dev *accel_dev, u32 obj_num);
332  	u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
333  	int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask);
334  	u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
335  	int (*dev_config)(struct adf_accel_dev *accel_dev);
336  	struct adf_pfvf_ops pfvf_ops;
337  	struct adf_hw_csr_ops csr_ops;
338  	struct adf_dc_ops dc_ops;
339  	struct adf_ras_ops ras_ops;
340  	struct adf_dev_err_mask dev_err_mask;
341  	struct adf_rl_hw_data rl_data;
342  	struct adf_tl_hw_data tl_data;
343  	struct qat_migdev_ops vfmig_ops;
344  	const char *fw_name;
345  	const char *fw_mmp_name;
346  	u32 fuses;
347  	u32 straps;
348  	u32 accel_capabilities_mask;
349  	u32 extended_dc_capabilities;
350  	u16 fw_capabilities;
351  	u32 clock_frequency;
352  	u32 instance_id;
353  	u16 accel_mask;
354  	u32 ae_mask;
355  	u32 admin_ae_mask;
356  	u16 tx_rings_mask;
357  	u16 ring_to_svc_map;
358  	u32 thd_to_arb_map[ICP_QAT_HW_AE_DELIMITER];
359  	u8 tx_rx_gap;
360  	u8 num_banks;
361  	u16 num_banks_per_vf;
362  	u8 num_rings_per_bank;
363  	u8 num_accel;
364  	u8 num_logical_accel;
365  	u8 num_engines;
366  	u32 num_hb_ctrs;
367  	u8 num_rps;
368  };
369  
370  /* CSR write macro */
371  #define ADF_CSR_WR(csr_base, csr_offset, val) \
372  	__raw_writel(val, csr_base + csr_offset)
373  
374  /* CSR read macro */
375  #define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
376  
377  #define ADF_CFG_NUM_SERVICES	4
378  #define ADF_SRV_TYPE_BIT_LEN	3
379  #define ADF_SRV_TYPE_MASK	0x7
380  #define ADF_AE_ADMIN_THREAD	7
381  #define ADF_NUM_THREADS_PER_AE	8
382  #define ADF_NUM_PKE_STRAND	2
383  #define ADF_AE_STRAND0_THREAD	8
384  #define ADF_AE_STRAND1_THREAD	9
385  
386  #define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
387  #define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
388  #define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
389  #define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
390  #define GET_NUM_RINGS_PER_BANK(accel_dev) \
391  	GET_HW_DATA(accel_dev)->num_rings_per_bank
392  #define GET_SRV_TYPE(accel_dev, idx) \
393  	(((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \
394  	& ADF_SRV_TYPE_MASK)
395  #define GET_ERR_MASK(accel_dev) (&GET_HW_DATA(accel_dev)->dev_err_mask)
396  #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
397  #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
398  #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
399  #define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
400  #define GET_VFMIG_OPS(accel_dev) (&(accel_dev)->hw_device->vfmig_ops)
401  #define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data
402  #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
403  
404  struct adf_admin_comms;
405  struct icp_qat_fw_loader_handle;
406  struct adf_fw_loader_data {
407  	struct icp_qat_fw_loader_handle *fw_loader;
408  	const struct firmware *uof_fw;
409  	const struct firmware *mmp_fw;
410  };
411  
412  struct adf_accel_vf_info {
413  	struct adf_accel_dev *accel_dev;
414  	struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
415  	struct mutex pfvf_mig_lock; /* protects PFVF state for migration */
416  	struct ratelimit_state vf2pf_ratelimit;
417  	u32 vf_nr;
418  	bool init;
419  	bool restarting;
420  	u8 vf_compat_ver;
421  	/*
422  	 * Private area used for device migration.
423  	 * Memory allocation and free is managed by migration driver.
424  	 */
425  	void *mig_priv;
426  };
427  
428  struct adf_dc_data {
429  	u8 *ovf_buff;
430  	size_t ovf_buff_sz;
431  	dma_addr_t ovf_buff_p;
432  };
433  
434  struct adf_pm {
435  	struct dentry *debugfs_pm_status;
436  	bool present;
437  	int idle_irq_counters;
438  	int throttle_irq_counters;
439  	int fw_irq_counters;
440  	int host_ack_counter;
441  	int host_nack_counter;
442  	ssize_t (*print_pm_status)(struct adf_accel_dev *accel_dev,
443  				   char __user *buf, size_t count, loff_t *pos);
444  };
445  
446  struct adf_sysfs {
447  	int ring_num;
448  	struct rw_semaphore lock; /* protects access to the fields in this struct */
449  };
450  
451  struct adf_accel_dev {
452  	struct adf_etr_data *transport;
453  	struct adf_hw_device_data *hw_device;
454  	struct adf_cfg_device_data *cfg;
455  	struct adf_fw_loader_data *fw_loader;
456  	struct adf_admin_comms *admin;
457  	struct adf_telemetry *telemetry;
458  	struct adf_dc_data *dc_data;
459  	struct adf_pm power_management;
460  	struct list_head crypto_list;
461  	struct list_head compression_list;
462  	unsigned long status;
463  	atomic_t ref_count;
464  	struct dentry *debugfs_dir;
465  	struct dentry *fw_cntr_dbgfile;
466  	struct dentry *cnv_dbgfile;
467  	struct list_head list;
468  	struct module *owner;
469  	struct adf_accel_pci accel_pci_dev;
470  	struct adf_timer *timer;
471  	struct adf_heartbeat *heartbeat;
472  	struct adf_rl *rate_limiting;
473  	struct adf_sysfs sysfs;
474  	union {
475  		struct {
476  			/* protects VF2PF interrupts access */
477  			spinlock_t vf2pf_ints_lock;
478  			/* vf_info is non-zero when SR-IOV is init'ed */
479  			struct adf_accel_vf_info *vf_info;
480  		} pf;
481  		struct {
482  			bool irq_enabled;
483  			char irq_name[ADF_MAX_MSIX_VECTOR_NAME];
484  			struct tasklet_struct pf2vf_bh_tasklet;
485  			struct mutex vf2pf_lock; /* protect CSR access */
486  			struct completion msg_received;
487  			struct pfvf_message response; /* temp field holding pf2vf response */
488  			u8 pf_compat_ver;
489  		} vf;
490  	};
491  	struct adf_error_counters ras_errors;
492  	struct mutex state_lock; /* protect state of the device */
493  	bool is_vf;
494  	bool autoreset_on_error;
495  	u32 accel_id;
496  };
497  #endif
498