xref: /linux/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h (revision ae22a94997b8a03dcb3c922857c203246711f9d4)
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #ifndef ADF_ACCEL_DEVICES_H_
4 #define ADF_ACCEL_DEVICES_H_
5 #include <linux/interrupt.h>
6 #include <linux/module.h>
7 #include <linux/list.h>
8 #include <linux/io.h>
9 #include <linux/pci.h>
10 #include <linux/ratelimit.h>
11 #include <linux/types.h>
12 #include "adf_cfg_common.h"
13 #include "adf_rl.h"
14 #include "adf_telemetry.h"
15 #include "adf_pfvf_msg.h"
16 #include "icp_qat_hw.h"
17 
18 #define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
19 #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf"
20 #define ADF_C62X_DEVICE_NAME "c6xx"
21 #define ADF_C62XVF_DEVICE_NAME "c6xxvf"
22 #define ADF_C3XXX_DEVICE_NAME "c3xxx"
23 #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf"
24 #define ADF_4XXX_DEVICE_NAME "4xxx"
25 #define ADF_420XX_DEVICE_NAME "420xx"
26 #define ADF_4XXX_PCI_DEVICE_ID 0x4940
27 #define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941
28 #define ADF_401XX_PCI_DEVICE_ID 0x4942
29 #define ADF_401XXIOV_PCI_DEVICE_ID 0x4943
30 #define ADF_402XX_PCI_DEVICE_ID 0x4944
31 #define ADF_402XXIOV_PCI_DEVICE_ID 0x4945
32 #define ADF_420XX_PCI_DEVICE_ID 0x4946
33 #define ADF_420XXIOV_PCI_DEVICE_ID 0x4947
34 #define ADF_DEVICE_FUSECTL_OFFSET 0x40
35 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C
36 #define ADF_DEVICE_FUSECTL_MASK 0x80000000
37 #define ADF_PCI_MAX_BARS 3
38 #define ADF_DEVICE_NAME_LENGTH 32
39 #define ADF_ETR_MAX_RINGS_PER_BANK 16
40 #define ADF_MAX_MSIX_VECTOR_NAME 48
41 #define ADF_DEVICE_NAME_PREFIX "qat_"
42 
43 enum adf_accel_capabilities {
44 	ADF_ACCEL_CAPABILITIES_NULL = 0,
45 	ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
46 	ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
47 	ADF_ACCEL_CAPABILITIES_CIPHER = 4,
48 	ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
49 	ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
50 	ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64,
51 	ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
52 };
53 
54 struct adf_bar {
55 	resource_size_t base_addr;
56 	void __iomem *virt_addr;
57 	resource_size_t size;
58 };
59 
60 struct adf_irq {
61 	bool enabled;
62 	char name[ADF_MAX_MSIX_VECTOR_NAME];
63 };
64 
65 struct adf_accel_msix {
66 	struct adf_irq *irqs;
67 	u32 num_entries;
68 };
69 
70 struct adf_accel_pci {
71 	struct pci_dev *pci_dev;
72 	struct adf_accel_msix msix_entries;
73 	struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
74 	u8 revid;
75 	u8 sku;
76 };
77 
78 enum dev_state {
79 	DEV_DOWN = 0,
80 	DEV_UP
81 };
82 
83 enum dev_sku_info {
84 	DEV_SKU_1 = 0,
85 	DEV_SKU_2,
86 	DEV_SKU_3,
87 	DEV_SKU_4,
88 	DEV_SKU_VF,
89 	DEV_SKU_UNKNOWN,
90 };
91 
92 enum ras_errors {
93 	ADF_RAS_CORR,
94 	ADF_RAS_UNCORR,
95 	ADF_RAS_FATAL,
96 	ADF_RAS_ERRORS,
97 };
98 
99 struct adf_error_counters {
100 	atomic_t counter[ADF_RAS_ERRORS];
101 	bool sysfs_added;
102 	bool enabled;
103 };
104 
105 static inline const char *get_sku_info(enum dev_sku_info info)
106 {
107 	switch (info) {
108 	case DEV_SKU_1:
109 		return "SKU1";
110 	case DEV_SKU_2:
111 		return "SKU2";
112 	case DEV_SKU_3:
113 		return "SKU3";
114 	case DEV_SKU_4:
115 		return "SKU4";
116 	case DEV_SKU_VF:
117 		return "SKUVF";
118 	case DEV_SKU_UNKNOWN:
119 	default:
120 		break;
121 	}
122 	return "Unknown SKU";
123 }
124 
125 struct adf_hw_device_class {
126 	const char *name;
127 	const enum adf_device_type type;
128 	u32 instances;
129 };
130 
131 struct arb_info {
132 	u32 arb_cfg;
133 	u32 arb_offset;
134 	u32 wt2sam_offset;
135 };
136 
137 struct admin_info {
138 	u32 admin_msg_ur;
139 	u32 admin_msg_lr;
140 	u32 mailbox_offset;
141 };
142 
143 struct adf_hw_csr_ops {
144 	u64 (*build_csr_ring_base_addr)(dma_addr_t addr, u32 size);
145 	u32 (*read_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
146 				  u32 ring);
147 	void (*write_csr_ring_head)(void __iomem *csr_base_addr, u32 bank,
148 				    u32 ring, u32 value);
149 	u32 (*read_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
150 				  u32 ring);
151 	void (*write_csr_ring_tail)(void __iomem *csr_base_addr, u32 bank,
152 				    u32 ring, u32 value);
153 	u32 (*read_csr_e_stat)(void __iomem *csr_base_addr, u32 bank);
154 	void (*write_csr_ring_config)(void __iomem *csr_base_addr, u32 bank,
155 				      u32 ring, u32 value);
156 	void (*write_csr_ring_base)(void __iomem *csr_base_addr, u32 bank,
157 				    u32 ring, dma_addr_t addr);
158 	void (*write_csr_int_flag)(void __iomem *csr_base_addr, u32 bank,
159 				   u32 value);
160 	void (*write_csr_int_srcsel)(void __iomem *csr_base_addr, u32 bank);
161 	void (*write_csr_int_col_en)(void __iomem *csr_base_addr, u32 bank,
162 				     u32 value);
163 	void (*write_csr_int_col_ctl)(void __iomem *csr_base_addr, u32 bank,
164 				      u32 value);
165 	void (*write_csr_int_flag_and_col)(void __iomem *csr_base_addr,
166 					   u32 bank, u32 value);
167 	void (*write_csr_ring_srv_arb_en)(void __iomem *csr_base_addr, u32 bank,
168 					  u32 value);
169 };
170 
171 struct adf_cfg_device_data;
172 struct adf_accel_dev;
173 struct adf_etr_data;
174 struct adf_etr_ring_data;
175 
176 struct adf_ras_ops {
177 	void (*enable_ras_errors)(struct adf_accel_dev *accel_dev);
178 	void (*disable_ras_errors)(struct adf_accel_dev *accel_dev);
179 	bool (*handle_interrupt)(struct adf_accel_dev *accel_dev,
180 				 bool *reset_required);
181 };
182 
183 struct adf_pfvf_ops {
184 	int (*enable_comms)(struct adf_accel_dev *accel_dev);
185 	u32 (*get_pf2vf_offset)(u32 i);
186 	u32 (*get_vf2pf_offset)(u32 i);
187 	void (*enable_vf2pf_interrupts)(void __iomem *pmisc_addr, u32 vf_mask);
188 	void (*disable_all_vf2pf_interrupts)(void __iomem *pmisc_addr);
189 	u32 (*disable_pending_vf2pf_interrupts)(void __iomem *pmisc_addr);
190 	int (*send_msg)(struct adf_accel_dev *accel_dev, struct pfvf_message msg,
191 			u32 pfvf_offset, struct mutex *csr_lock);
192 	struct pfvf_message (*recv_msg)(struct adf_accel_dev *accel_dev,
193 					u32 pfvf_offset, u8 compat_ver);
194 };
195 
196 struct adf_dc_ops {
197 	void (*build_deflate_ctx)(void *ctx);
198 };
199 
200 struct adf_dev_err_mask {
201 	u32 cppagentcmdpar_mask;
202 	u32 parerr_ath_cph_mask;
203 	u32 parerr_cpr_xlt_mask;
204 	u32 parerr_dcpr_ucs_mask;
205 	u32 parerr_pke_mask;
206 	u32 parerr_wat_wcp_mask;
207 	u32 ssmfeatren_mask;
208 };
209 
210 struct adf_hw_device_data {
211 	struct adf_hw_device_class *dev_class;
212 	u32 (*get_accel_mask)(struct adf_hw_device_data *self);
213 	u32 (*get_ae_mask)(struct adf_hw_device_data *self);
214 	u32 (*get_accel_cap)(struct adf_accel_dev *accel_dev);
215 	u32 (*get_sram_bar_id)(struct adf_hw_device_data *self);
216 	u32 (*get_misc_bar_id)(struct adf_hw_device_data *self);
217 	u32 (*get_etr_bar_id)(struct adf_hw_device_data *self);
218 	u32 (*get_num_aes)(struct adf_hw_device_data *self);
219 	u32 (*get_num_accels)(struct adf_hw_device_data *self);
220 	void (*get_arb_info)(struct arb_info *arb_csrs_info);
221 	void (*get_admin_info)(struct admin_info *admin_csrs_info);
222 	enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
223 	u16 (*get_ring_to_svc_map)(struct adf_accel_dev *accel_dev);
224 	int (*alloc_irq)(struct adf_accel_dev *accel_dev);
225 	void (*free_irq)(struct adf_accel_dev *accel_dev);
226 	void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
227 	int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
228 	void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
229 	int (*send_admin_init)(struct adf_accel_dev *accel_dev);
230 	int (*start_timer)(struct adf_accel_dev *accel_dev);
231 	void (*stop_timer)(struct adf_accel_dev *accel_dev);
232 	void (*check_hb_ctrs)(struct adf_accel_dev *accel_dev);
233 	uint32_t (*get_hb_clock)(struct adf_hw_device_data *self);
234 	int (*measure_clock)(struct adf_accel_dev *accel_dev);
235 	int (*init_arb)(struct adf_accel_dev *accel_dev);
236 	void (*exit_arb)(struct adf_accel_dev *accel_dev);
237 	const u32 *(*get_arb_mapping)(struct adf_accel_dev *accel_dev);
238 	int (*init_device)(struct adf_accel_dev *accel_dev);
239 	int (*enable_pm)(struct adf_accel_dev *accel_dev);
240 	bool (*handle_pm_interrupt)(struct adf_accel_dev *accel_dev);
241 	void (*disable_iov)(struct adf_accel_dev *accel_dev);
242 	void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
243 				      bool enable);
244 	void (*enable_ints)(struct adf_accel_dev *accel_dev);
245 	void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
246 	int (*ring_pair_reset)(struct adf_accel_dev *accel_dev, u32 bank_nr);
247 	void (*reset_device)(struct adf_accel_dev *accel_dev);
248 	void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
249 	const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num);
250 	u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev);
251 	int (*uof_get_obj_type)(struct adf_accel_dev *accel_dev, u32 obj_num);
252 	u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
253 	int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask);
254 	u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num);
255 	int (*dev_config)(struct adf_accel_dev *accel_dev);
256 	struct adf_pfvf_ops pfvf_ops;
257 	struct adf_hw_csr_ops csr_ops;
258 	struct adf_dc_ops dc_ops;
259 	struct adf_ras_ops ras_ops;
260 	struct adf_dev_err_mask dev_err_mask;
261 	struct adf_rl_hw_data rl_data;
262 	struct adf_tl_hw_data tl_data;
263 	const char *fw_name;
264 	const char *fw_mmp_name;
265 	u32 fuses;
266 	u32 straps;
267 	u32 accel_capabilities_mask;
268 	u32 extended_dc_capabilities;
269 	u16 fw_capabilities;
270 	u32 clock_frequency;
271 	u32 instance_id;
272 	u16 accel_mask;
273 	u32 ae_mask;
274 	u32 admin_ae_mask;
275 	u16 tx_rings_mask;
276 	u16 ring_to_svc_map;
277 	u32 thd_to_arb_map[ICP_QAT_HW_AE_DELIMITER];
278 	u8 tx_rx_gap;
279 	u8 num_banks;
280 	u16 num_banks_per_vf;
281 	u8 num_rings_per_bank;
282 	u8 num_accel;
283 	u8 num_logical_accel;
284 	u8 num_engines;
285 	u32 num_hb_ctrs;
286 	u8 num_rps;
287 };
288 
289 /* CSR write macro */
290 #define ADF_CSR_WR(csr_base, csr_offset, val) \
291 	__raw_writel(val, csr_base + csr_offset)
292 
293 /* CSR read macro */
294 #define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
295 
296 #define ADF_CFG_NUM_SERVICES	4
297 #define ADF_SRV_TYPE_BIT_LEN	3
298 #define ADF_SRV_TYPE_MASK	0x7
299 #define ADF_AE_ADMIN_THREAD	7
300 #define ADF_NUM_THREADS_PER_AE	8
301 #define ADF_NUM_PKE_STRAND	2
302 #define ADF_AE_STRAND0_THREAD	8
303 #define ADF_AE_STRAND1_THREAD	9
304 
305 #define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
306 #define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
307 #define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
308 #define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
309 #define GET_NUM_RINGS_PER_BANK(accel_dev) \
310 	GET_HW_DATA(accel_dev)->num_rings_per_bank
311 #define GET_SRV_TYPE(accel_dev, idx) \
312 	(((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \
313 	& ADF_SRV_TYPE_MASK)
314 #define GET_ERR_MASK(accel_dev) (&GET_HW_DATA(accel_dev)->dev_err_mask)
315 #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
316 #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops)
317 #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops)
318 #define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops)
319 #define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data
320 #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
321 
322 struct adf_admin_comms;
323 struct icp_qat_fw_loader_handle;
324 struct adf_fw_loader_data {
325 	struct icp_qat_fw_loader_handle *fw_loader;
326 	const struct firmware *uof_fw;
327 	const struct firmware *mmp_fw;
328 };
329 
330 struct adf_accel_vf_info {
331 	struct adf_accel_dev *accel_dev;
332 	struct mutex pf2vf_lock; /* protect CSR access for PF2VF messages */
333 	struct ratelimit_state vf2pf_ratelimit;
334 	u32 vf_nr;
335 	bool init;
336 	bool restarting;
337 	u8 vf_compat_ver;
338 };
339 
340 struct adf_dc_data {
341 	u8 *ovf_buff;
342 	size_t ovf_buff_sz;
343 	dma_addr_t ovf_buff_p;
344 };
345 
346 struct adf_pm {
347 	struct dentry *debugfs_pm_status;
348 	bool present;
349 	int idle_irq_counters;
350 	int throttle_irq_counters;
351 	int fw_irq_counters;
352 	int host_ack_counter;
353 	int host_nack_counter;
354 	ssize_t (*print_pm_status)(struct adf_accel_dev *accel_dev,
355 				   char __user *buf, size_t count, loff_t *pos);
356 };
357 
358 struct adf_sysfs {
359 	int ring_num;
360 	struct rw_semaphore lock; /* protects access to the fields in this struct */
361 };
362 
363 struct adf_accel_dev {
364 	struct adf_etr_data *transport;
365 	struct adf_hw_device_data *hw_device;
366 	struct adf_cfg_device_data *cfg;
367 	struct adf_fw_loader_data *fw_loader;
368 	struct adf_admin_comms *admin;
369 	struct adf_telemetry *telemetry;
370 	struct adf_dc_data *dc_data;
371 	struct adf_pm power_management;
372 	struct list_head crypto_list;
373 	struct list_head compression_list;
374 	unsigned long status;
375 	atomic_t ref_count;
376 	struct dentry *debugfs_dir;
377 	struct dentry *fw_cntr_dbgfile;
378 	struct dentry *cnv_dbgfile;
379 	struct list_head list;
380 	struct module *owner;
381 	struct adf_accel_pci accel_pci_dev;
382 	struct adf_timer *timer;
383 	struct adf_heartbeat *heartbeat;
384 	struct adf_rl *rate_limiting;
385 	struct adf_sysfs sysfs;
386 	union {
387 		struct {
388 			/* protects VF2PF interrupts access */
389 			spinlock_t vf2pf_ints_lock;
390 			/* vf_info is non-zero when SR-IOV is init'ed */
391 			struct adf_accel_vf_info *vf_info;
392 		} pf;
393 		struct {
394 			bool irq_enabled;
395 			char irq_name[ADF_MAX_MSIX_VECTOR_NAME];
396 			struct tasklet_struct pf2vf_bh_tasklet;
397 			struct mutex vf2pf_lock; /* protect CSR access */
398 			struct completion msg_received;
399 			struct pfvf_message response; /* temp field holding pf2vf response */
400 			u8 pf_compat_ver;
401 		} vf;
402 	};
403 	struct adf_error_counters ras_errors;
404 	struct mutex state_lock; /* protect state of the device */
405 	bool is_vf;
406 	bool autoreset_on_error;
407 	u32 accel_id;
408 };
409 #endif
410