xref: /linux/drivers/crypto/intel/qat/qat_6xxx/adf_6xxx_hw_data.c (revision 8d2b0853add1d7534dc0794e3c8e0b9e8c4ec640)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2025 Intel Corporation */
3 #include <linux/array_size.h>
4 #include <linux/bitfield.h>
5 #include <linux/bitops.h>
6 #include <linux/bits.h>
7 #include <linux/iopoll.h>
8 #include <linux/pci.h>
9 #include <linux/types.h>
10 
11 #include <adf_accel_devices.h>
12 #include <adf_admin.h>
13 #include <adf_bank_state.h>
14 #include <adf_cfg.h>
15 #include <adf_cfg_services.h>
16 #include <adf_clock.h>
17 #include <adf_common_drv.h>
18 #include <adf_fw_config.h>
19 #include <adf_gen6_pm.h>
20 #include <adf_gen6_ras.h>
21 #include <adf_gen6_shared.h>
22 #include <adf_gen6_tl.h>
23 #include <adf_timer.h>
24 #include "adf_6xxx_hw_data.h"
25 #include "icp_qat_fw_comp.h"
26 #include "icp_qat_hw_51_comp.h"
27 
28 #define RP_GROUP_0_MASK		(BIT(0) | BIT(2))
29 #define RP_GROUP_1_MASK		(BIT(1) | BIT(3))
30 #define RP_GROUP_ALL_MASK	(RP_GROUP_0_MASK | RP_GROUP_1_MASK)
31 
32 #define ADF_AE_GROUP_0		GENMASK(3, 0)
33 #define ADF_AE_GROUP_1		GENMASK(7, 4)
34 #define ADF_AE_GROUP_2		BIT(8)
35 
36 struct adf_ring_config {
37 	u32 ring_mask;
38 	enum adf_cfg_service_type ring_type;
39 	const unsigned long *thrd_mask;
40 };
41 
42 static u32 rmask_two_services[] = {
43 	RP_GROUP_0_MASK,
44 	RP_GROUP_1_MASK,
45 };
46 
47 enum adf_gen6_rps {
48 	RP0 = 0,
49 	RP1 = 1,
50 	RP2 = 2,
51 	RP3 = 3,
52 	RP_MAX = RP3
53 };
54 
55 /*
56  * thrd_mask_[sym|asym|cpr|dcc]: these static arrays define the thread
57  * configuration for handling requests of specific services across the
58  * accelerator engines. Each element in an array corresponds to an
59  * accelerator engine, with the value being a bitmask that specifies which
60  * threads within that engine are capable of processing the particular service.
61  *
62  * For example, a value of 0x0C means that threads 2 and 3 are enabled for the
63  * service in the respective accelerator engine.
64  */
65 static const unsigned long thrd_mask_sym[ADF_6XXX_MAX_ACCELENGINES] = {
66 	0x0C, 0x0C, 0x0C, 0x0C, 0x1C, 0x1C, 0x1C, 0x1C, 0x00
67 };
68 
69 static const unsigned long thrd_mask_asym[ADF_6XXX_MAX_ACCELENGINES] = {
70 	0x70, 0x70, 0x70, 0x70, 0x60, 0x60, 0x60, 0x60, 0x00
71 };
72 
73 static const unsigned long thrd_mask_cpr[ADF_6XXX_MAX_ACCELENGINES] = {
74 	0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00
75 };
76 
77 static const unsigned long thrd_mask_dcc[ADF_6XXX_MAX_ACCELENGINES] = {
78 	0x00, 0x00, 0x00, 0x00, 0x07, 0x07, 0x03, 0x03, 0x00
79 };
80 
81 static const unsigned long thrd_mask_dcpr[ADF_6XXX_MAX_ACCELENGINES] = {
82 	0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x00
83 };
84 
85 static const char *const adf_6xxx_fw_objs[] = {
86 	[ADF_FW_CY_OBJ] = ADF_6XXX_CY_OBJ,
87 	[ADF_FW_DC_OBJ] = ADF_6XXX_DC_OBJ,
88 	[ADF_FW_ADMIN_OBJ] = ADF_6XXX_ADMIN_OBJ,
89 };
90 
91 static const struct adf_fw_config adf_default_fw_config[] = {
92 	{ ADF_AE_GROUP_1, ADF_FW_DC_OBJ },
93 	{ ADF_AE_GROUP_0, ADF_FW_CY_OBJ },
94 	{ ADF_AE_GROUP_2, ADF_FW_ADMIN_OBJ },
95 };
96 
97 static struct adf_hw_device_class adf_6xxx_class = {
98 	.name = ADF_6XXX_DEVICE_NAME,
99 	.type = DEV_6XXX,
100 };
101 
102 static bool services_supported(unsigned long mask)
103 {
104 	int num_svc;
105 
106 	if (mask >= BIT(SVC_COUNT))
107 		return false;
108 
109 	num_svc = hweight_long(mask);
110 	switch (num_svc) {
111 	case ADF_ONE_SERVICE:
112 		return true;
113 	case ADF_TWO_SERVICES:
114 	case ADF_THREE_SERVICES:
115 		return !test_bit(SVC_DCC, &mask);
116 	default:
117 		return false;
118 	}
119 }
120 
121 static int get_service(unsigned long *mask)
122 {
123 	if (test_and_clear_bit(SVC_ASYM, mask))
124 		return SVC_ASYM;
125 
126 	if (test_and_clear_bit(SVC_SYM, mask))
127 		return SVC_SYM;
128 
129 	if (test_and_clear_bit(SVC_DC, mask))
130 		return SVC_DC;
131 
132 	if (test_and_clear_bit(SVC_DCC, mask))
133 		return SVC_DCC;
134 
135 	if (test_and_clear_bit(SVC_DECOMP, mask))
136 		return SVC_DECOMP;
137 
138 	return -EINVAL;
139 }
140 
141 static enum adf_cfg_service_type get_ring_type(unsigned int service)
142 {
143 	switch (service) {
144 	case SVC_SYM:
145 		return SYM;
146 	case SVC_ASYM:
147 		return ASYM;
148 	case SVC_DC:
149 	case SVC_DCC:
150 		return COMP;
151 	case SVC_DECOMP:
152 		return DECOMP;
153 	default:
154 		return UNUSED;
155 	}
156 }
157 
158 static const unsigned long *get_thrd_mask(unsigned int service)
159 {
160 	switch (service) {
161 	case SVC_SYM:
162 		return thrd_mask_sym;
163 	case SVC_ASYM:
164 		return thrd_mask_asym;
165 	case SVC_DC:
166 		return thrd_mask_cpr;
167 	case SVC_DCC:
168 		return thrd_mask_dcc;
169 	case SVC_DECOMP:
170 		return thrd_mask_dcpr;
171 	default:
172 		return NULL;
173 	}
174 }
175 
176 static int get_rp_config(struct adf_accel_dev *accel_dev, struct adf_ring_config *rp_config,
177 			 unsigned int *num_services)
178 {
179 	unsigned int i, nservices;
180 	unsigned long mask;
181 	int ret, service;
182 
183 	ret = adf_get_service_mask(accel_dev, &mask);
184 	if (ret)
185 		return ret;
186 
187 	nservices = hweight_long(mask);
188 	if (nservices > MAX_NUM_CONCURR_SVC)
189 		return -EINVAL;
190 
191 	for (i = 0; i < nservices; i++) {
192 		service = get_service(&mask);
193 		if (service < 0)
194 			return service;
195 
196 		rp_config[i].ring_type = get_ring_type(service);
197 		rp_config[i].thrd_mask = get_thrd_mask(service);
198 
199 		/*
200 		 * If there is only one service enabled, use all ring pairs for
201 		 * that service.
202 		 * If there are two services enabled, use ring pairs 0 and 2 for
203 		 * one service and ring pairs 1 and 3 for the other service.
204 		 */
205 		switch (nservices) {
206 		case ADF_ONE_SERVICE:
207 			rp_config[i].ring_mask = RP_GROUP_ALL_MASK;
208 			break;
209 		case ADF_TWO_SERVICES:
210 			rp_config[i].ring_mask = rmask_two_services[i];
211 			break;
212 		case ADF_THREE_SERVICES:
213 			rp_config[i].ring_mask = BIT(i);
214 
215 			/* If ASYM is enabled, use additional ring pair */
216 			if (service == SVC_ASYM)
217 				rp_config[i].ring_mask |= BIT(RP3);
218 
219 			break;
220 		default:
221 			return -EINVAL;
222 		}
223 	}
224 
225 	*num_services = nservices;
226 
227 	return 0;
228 }
229 
230 static u32 adf_gen6_get_arb_mask(struct adf_accel_dev *accel_dev, unsigned int ae)
231 {
232 	struct adf_ring_config rp_config[MAX_NUM_CONCURR_SVC];
233 	unsigned int num_services, i, thrd;
234 	u32 ring_mask, thd2arb_mask = 0;
235 	const unsigned long *p_mask;
236 
237 	if (get_rp_config(accel_dev, rp_config, &num_services))
238 		return 0;
239 
240 	/*
241 	 * The thd2arb_mask maps ring pairs to threads within an accelerator engine.
242 	 * It ensures that jobs submitted to ring pairs are scheduled on threads capable
243 	 * of handling the specified service type.
244 	 *
245 	 * Each group of 4 bits in the mask corresponds to a thread, with each bit
246 	 * indicating whether a job from a ring pair can be scheduled on that thread.
247 	 * The use of 4 bits is due to the organization of ring pairs into groups of
248 	 * four, where each group shares the same configuration.
249 	 */
250 	for (i = 0; i < num_services; i++) {
251 		p_mask = &rp_config[i].thrd_mask[ae];
252 		ring_mask = rp_config[i].ring_mask;
253 
254 		for_each_set_bit(thrd, p_mask, ADF_NUM_THREADS_PER_AE)
255 			thd2arb_mask |= ring_mask << (thrd * 4);
256 	}
257 
258 	return thd2arb_mask;
259 }
260 
261 static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
262 {
263 	enum adf_cfg_service_type rps[ADF_GEN6_NUM_BANKS_PER_VF] = { };
264 	struct adf_ring_config rp_config[MAX_NUM_CONCURR_SVC];
265 	unsigned int num_services, rp_num, i;
266 	unsigned long cfg_mask;
267 	u16 ring_to_svc_map;
268 
269 	if (get_rp_config(accel_dev, rp_config, &num_services))
270 		return 0;
271 
272 	/*
273 	 * Loop through the configured services and populate the `rps` array that
274 	 * contains what service that particular ring pair can handle (i.e. symmetric
275 	 * crypto, asymmetric crypto, data compression or compression chaining).
276 	 */
277 	for (i = 0; i < num_services; i++) {
278 		cfg_mask = rp_config[i].ring_mask;
279 		for_each_set_bit(rp_num, &cfg_mask, ADF_GEN6_NUM_BANKS_PER_VF)
280 			rps[rp_num] = rp_config[i].ring_type;
281 	}
282 
283 	/*
284 	 * The ring_mask is structured into segments of 3 bits, with each
285 	 * segment representing the service configuration for a specific ring pair.
286 	 * Since ring pairs are organized into groups of 4, the ring_mask contains 4
287 	 * such 3-bit segments, each corresponding to one ring pair.
288 	 *
289 	 * The device has 64 ring pairs, which are organized in groups of 4, namely
290 	 * 16 groups. Each group has the same configuration, represented here by
291 	 * `ring_to_svc_map`.
292 	 */
293 	ring_to_svc_map = rps[RP0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
294 			  rps[RP1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
295 			  rps[RP2] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
296 			  rps[RP3] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
297 
298 	return ring_to_svc_map;
299 }
300 
301 static u32 get_accel_mask(struct adf_hw_device_data *self)
302 {
303 	return ADF_GEN6_ACCELERATORS_MASK;
304 }
305 
306 static u32 get_num_accels(struct adf_hw_device_data *self)
307 {
308 	return ADF_GEN6_MAX_ACCELERATORS;
309 }
310 
311 static u32 get_num_aes(struct adf_hw_device_data *self)
312 {
313 	return self ? hweight32(self->ae_mask) : 0;
314 }
315 
316 static u32 get_misc_bar_id(struct adf_hw_device_data *self)
317 {
318 	return ADF_GEN6_PMISC_BAR;
319 }
320 
321 static u32 get_etr_bar_id(struct adf_hw_device_data *self)
322 {
323 	return ADF_GEN6_ETR_BAR;
324 }
325 
326 static u32 get_sram_bar_id(struct adf_hw_device_data *self)
327 {
328 	return ADF_GEN6_SRAM_BAR;
329 }
330 
331 static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
332 {
333 	return DEV_SKU_1;
334 }
335 
336 static void get_arb_info(struct arb_info *arb_info)
337 {
338 	arb_info->arb_cfg = ADF_GEN6_ARB_CONFIG;
339 	arb_info->arb_offset = ADF_GEN6_ARB_OFFSET;
340 	arb_info->wt2sam_offset = ADF_GEN6_ARB_WRK_2_SER_MAP_OFFSET;
341 }
342 
343 static void get_admin_info(struct admin_info *admin_csrs_info)
344 {
345 	admin_csrs_info->mailbox_offset = ADF_GEN6_MAILBOX_BASE_OFFSET;
346 	admin_csrs_info->admin_msg_ur = ADF_GEN6_ADMINMSGUR_OFFSET;
347 	admin_csrs_info->admin_msg_lr = ADF_GEN6_ADMINMSGLR_OFFSET;
348 }
349 
350 static u32 get_heartbeat_clock(struct adf_hw_device_data *self)
351 {
352 	return ADF_GEN6_COUNTER_FREQ;
353 }
354 
355 static void enable_error_correction(struct adf_accel_dev *accel_dev)
356 {
357 	void __iomem *csr = adf_get_pmisc_base(accel_dev);
358 
359 	/*
360 	 * Enable all error notification bits in errsou3 except VFLR
361 	 * notification on host.
362 	 */
363 	ADF_CSR_WR(csr, ADF_GEN6_ERRMSK3, ADF_GEN6_VFLNOTIFY);
364 }
365 
366 static void enable_ints(struct adf_accel_dev *accel_dev)
367 {
368 	void __iomem *addr = adf_get_pmisc_base(accel_dev);
369 
370 	/* Enable bundle interrupts */
371 	ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_RP_X0_MASK_OFFSET, 0);
372 	ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_RP_X1_MASK_OFFSET, 0);
373 
374 	/* Enable misc interrupts */
375 	ADF_CSR_WR(addr, ADF_GEN6_SMIAPF_MASK_OFFSET, 0);
376 }
377 
378 static void set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
379 {
380 	void __iomem *addr = adf_get_pmisc_base(accel_dev);
381 	u64 val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
382 	u64 val = ADF_SSM_WDT_DEFAULT_VALUE;
383 
384 	/* Enable watchdog timer for sym and dc */
385 	ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTATHL_OFFSET, ADF_SSMWDTATHH_OFFSET, val);
386 	ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTCNVL_OFFSET, ADF_SSMWDTCNVH_OFFSET, val);
387 	ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTUCSL_OFFSET, ADF_SSMWDTUCSH_OFFSET, val);
388 	ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTDCPRL_OFFSET, ADF_SSMWDTDCPRH_OFFSET, val);
389 
390 	/* Enable watchdog timer for pke */
391 	ADF_CSR_WR64_LO_HI(addr, ADF_SSMWDTPKEL_OFFSET, ADF_SSMWDTPKEH_OFFSET, val_pke);
392 }
393 
394 /*
395  * The vector routing table is used to select the MSI-X entry to use for each
396  * interrupt source.
397  * The first ADF_GEN6_ETR_MAX_BANKS entries correspond to ring interrupts.
398  * The final entry corresponds to VF2PF or error interrupts.
399  * This vector table could be used to configure one MSI-X entry to be shared
400  * between multiple interrupt sources.
401  *
402  * The default routing is set to have a one to one correspondence between the
403  * interrupt source and the MSI-X entry used.
404  */
405 static void set_msix_default_rttable(struct adf_accel_dev *accel_dev)
406 {
407 	void __iomem *csr = adf_get_pmisc_base(accel_dev);
408 	unsigned int i;
409 
410 	for (i = 0; i <= ADF_GEN6_ETR_MAX_BANKS; i++)
411 		ADF_CSR_WR(csr, ADF_GEN6_MSIX_RTTABLE_OFFSET(i), i);
412 }
413 
414 static int reset_ring_pair(void __iomem *csr, u32 bank_number)
415 {
416 	u32 status;
417 	int ret;
418 
419 	/*
420 	 * Write rpresetctl register BIT(0) as 1.
421 	 * Since rpresetctl registers have no RW fields, no need to preserve
422 	 * values for other bits. Just write directly.
423 	 */
424 	ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
425 		   ADF_WQM_CSR_RPRESETCTL_RESET);
426 
427 	/* Read rpresetsts register and wait for rp reset to complete */
428 	ret = read_poll_timeout(ADF_CSR_RD, status,
429 				status & ADF_WQM_CSR_RPRESETSTS_STATUS,
430 				ADF_RPRESET_POLL_DELAY_US,
431 				ADF_RPRESET_POLL_TIMEOUT_US, true,
432 				csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
433 	if (ret)
434 		return ret;
435 
436 	/* When ring pair reset is done, clear rpresetsts */
437 	ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number), ADF_WQM_CSR_RPRESETSTS_STATUS);
438 
439 	return 0;
440 }
441 
442 static int ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
443 {
444 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
445 	void __iomem *csr = adf_get_etr_base(accel_dev);
446 	int ret;
447 
448 	if (bank_number >= hw_data->num_banks)
449 		return -EINVAL;
450 
451 	dev_dbg(&GET_DEV(accel_dev), "ring pair reset for bank:%d\n", bank_number);
452 
453 	ret = reset_ring_pair(csr, bank_number);
454 	if (ret)
455 		dev_err(&GET_DEV(accel_dev), "ring pair reset failed (timeout)\n");
456 	else
457 		dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n");
458 
459 	return ret;
460 }
461 
462 static int build_comp_block(void *ctx, enum adf_dc_algo algo)
463 {
464 	struct icp_qat_fw_comp_req *req_tmpl = ctx;
465 	struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
466 	struct icp_qat_hw_comp_51_config_csr_lower hw_comp_lower_csr = { };
467 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
468 	u32 lower_val;
469 
470 	switch (algo) {
471 	case QAT_DEFLATE:
472 		header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DYNAMIC;
473 	break;
474 	default:
475 		return -EINVAL;
476 	}
477 
478 	hw_comp_lower_csr.lllbd = ICP_QAT_HW_COMP_51_LLLBD_CTRL_LLLBD_DISABLED;
479 	hw_comp_lower_csr.sd = ICP_QAT_HW_COMP_51_SEARCH_DEPTH_LEVEL_1;
480 	lower_val = ICP_QAT_FW_COMP_51_BUILD_CONFIG_LOWER(hw_comp_lower_csr);
481 	cd_pars->u.sl.comp_slice_cfg_word[0] = lower_val;
482 	cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
483 
484 	return 0;
485 }
486 
487 static int build_decomp_block(void *ctx, enum adf_dc_algo algo)
488 {
489 	struct icp_qat_fw_comp_req *req_tmpl = ctx;
490 	struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
491 	struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
492 
493 	switch (algo) {
494 	case QAT_DEFLATE:
495 		header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
496 	break;
497 	default:
498 		return -EINVAL;
499 	}
500 
501 	cd_pars->u.sl.comp_slice_cfg_word[0] = 0;
502 	cd_pars->u.sl.comp_slice_cfg_word[1] = 0;
503 
504 	return 0;
505 }
506 
507 static void adf_gen6_init_dc_ops(struct adf_dc_ops *dc_ops)
508 {
509 	dc_ops->build_comp_block = build_comp_block;
510 	dc_ops->build_decomp_block = build_decomp_block;
511 }
512 
513 static int adf_gen6_init_thd2arb_map(struct adf_accel_dev *accel_dev)
514 {
515 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
516 	u32 *thd2arb_map = hw_data->thd_to_arb_map;
517 	unsigned int i;
518 
519 	for (i = 0; i < hw_data->num_engines; i++) {
520 		thd2arb_map[i] = adf_gen6_get_arb_mask(accel_dev, i);
521 		dev_dbg(&GET_DEV(accel_dev), "ME:%d arb_mask:%#x\n", i, thd2arb_map[i]);
522 	}
523 
524 	return 0;
525 }
526 
527 static void init_num_svc_aes(struct adf_rl_hw_data *device_data)
528 {
529 	enum adf_fw_objs obj_type, obj_iter;
530 	unsigned int svc, i, num_grp;
531 	u32 ae_mask;
532 
533 	for (svc = 0; svc < SVC_BASE_COUNT; svc++) {
534 		switch (svc) {
535 		case SVC_SYM:
536 		case SVC_ASYM:
537 			obj_type = ADF_FW_CY_OBJ;
538 			break;
539 		case SVC_DC:
540 		case SVC_DECOMP:
541 			obj_type = ADF_FW_DC_OBJ;
542 			break;
543 		}
544 
545 		num_grp = ARRAY_SIZE(adf_default_fw_config);
546 		for (i = 0; i < num_grp; i++) {
547 			obj_iter = adf_default_fw_config[i].obj;
548 			if (obj_iter == obj_type) {
549 				ae_mask = adf_default_fw_config[i].ae_mask;
550 				device_data->svc_ae_mask[svc] = hweight32(ae_mask);
551 				break;
552 			}
553 		}
554 	}
555 }
556 
557 static u32 adf_gen6_get_svc_slice_cnt(struct adf_accel_dev *accel_dev,
558 				      enum adf_base_services svc)
559 {
560 	struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data;
561 
562 	switch (svc) {
563 	case SVC_SYM:
564 		return device_data->slices.cph_cnt;
565 	case SVC_ASYM:
566 		return device_data->slices.pke_cnt;
567 	case SVC_DC:
568 		return device_data->slices.cpr_cnt + device_data->slices.dcpr_cnt;
569 	case SVC_DECOMP:
570 		return device_data->slices.dcpr_cnt;
571 	default:
572 		return 0;
573 	}
574 }
575 
576 static void set_vc_csr_for_bank(void __iomem *csr, u32 bank_number)
577 {
578 	u32 value;
579 
580 	/*
581 	 * After each PF FLR, for each of the 64 ring pairs in the PF, the
582 	 * driver must program the ringmodectl CSRs.
583 	 */
584 	value = ADF_CSR_RD(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number));
585 	FIELD_MODIFY(ADF_GEN6_RINGMODECTL_TC_MASK, &value, ADF_GEN6_RINGMODECTL_TC_DEFAULT);
586 	FIELD_MODIFY(ADF_GEN6_RINGMODECTL_TC_EN_MASK, &value, ADF_GEN6_RINGMODECTL_TC_EN_OP1);
587 	ADF_CSR_WR(csr, ADF_GEN6_CSR_RINGMODECTL(bank_number), value);
588 }
589 
590 static int set_vc_config(struct adf_accel_dev *accel_dev)
591 {
592 	struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
593 	u32 value;
594 	int err;
595 
596 	/*
597 	 * After each PF FLR, the driver must program the Port Virtual Channel (VC)
598 	 * Control Registers.
599 	 * Read PVC0CTL then write the masked values.
600 	 */
601 	pci_read_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, &value);
602 	FIELD_MODIFY(ADF_GEN6_PVC0CTL_TCVCMAP_MASK, &value, ADF_GEN6_PVC0CTL_TCVCMAP_DEFAULT);
603 	err = pci_write_config_dword(pdev, ADF_GEN6_PVC0CTL_OFFSET, value);
604 	if (err) {
605 		dev_err(&GET_DEV(accel_dev), "pci write to PVC0CTL failed\n");
606 		return pcibios_err_to_errno(err);
607 	}
608 
609 	/* Read PVC1CTL then write masked values */
610 	pci_read_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, &value);
611 	FIELD_MODIFY(ADF_GEN6_PVC1CTL_TCVCMAP_MASK, &value, ADF_GEN6_PVC1CTL_TCVCMAP_DEFAULT);
612 	FIELD_MODIFY(ADF_GEN6_PVC1CTL_VCEN_MASK, &value, ADF_GEN6_PVC1CTL_VCEN_ON);
613 	err = pci_write_config_dword(pdev, ADF_GEN6_PVC1CTL_OFFSET, value);
614 	if (err)
615 		dev_err(&GET_DEV(accel_dev), "pci write to PVC1CTL failed\n");
616 
617 	return pcibios_err_to_errno(err);
618 }
619 
620 static int adf_gen6_set_vc(struct adf_accel_dev *accel_dev)
621 {
622 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
623 	void __iomem *csr = adf_get_etr_base(accel_dev);
624 	u32 i;
625 
626 	for (i = 0; i < hw_data->num_banks; i++) {
627 		dev_dbg(&GET_DEV(accel_dev), "set virtual channels for bank:%d\n", i);
628 		set_vc_csr_for_bank(csr, i);
629 	}
630 
631 	return set_vc_config(accel_dev);
632 }
633 
634 static u32 get_ae_mask(struct adf_hw_device_data *self)
635 {
636 	unsigned long fuses = self->fuses[ADF_FUSECTL4];
637 	u32 mask = ADF_6XXX_ACCELENGINES_MASK;
638 
639 	/*
640 	 * If bit 0 is set in the fuses, the first 4 engines are disabled.
641 	 * If bit 4 is set, the second group of 4 engines are disabled.
642 	 * If bit 8 is set, the admin engine (bit 8) is disabled.
643 	 */
644 	if (test_bit(0, &fuses))
645 		mask &= ~ADF_AE_GROUP_0;
646 
647 	if (test_bit(4, &fuses))
648 		mask &= ~ADF_AE_GROUP_1;
649 
650 	if (test_bit(8, &fuses))
651 		mask &= ~ADF_AE_GROUP_2;
652 
653 	return mask;
654 }
655 
656 static u32 get_accel_cap(struct adf_accel_dev *accel_dev)
657 {
658 	u32 capabilities_sym, capabilities_asym;
659 	u32 capabilities_dc;
660 	unsigned long mask;
661 	u32 caps = 0;
662 	u32 fusectl1;
663 
664 	fusectl1 = GET_HW_DATA(accel_dev)->fuses[ADF_FUSECTL1];
665 
666 	/* Read accelerator capabilities mask */
667 	capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
668 			  ICP_ACCEL_CAPABILITIES_CIPHER |
669 			  ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
670 			  ICP_ACCEL_CAPABILITIES_SHA3 |
671 			  ICP_ACCEL_CAPABILITIES_SHA3_EXT |
672 			  ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
673 			  ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
674 			  ICP_ACCEL_CAPABILITIES_AES_V2;
675 
676 	/* A set bit in fusectl1 means the corresponding feature is OFF in this SKU */
677 	if (fusectl1 & ICP_ACCEL_GEN6_MASK_UCS_SLICE) {
678 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
679 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
680 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
681 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
682 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
683 	}
684 	if (fusectl1 & ICP_ACCEL_GEN6_MASK_AUTH_SLICE) {
685 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
686 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3;
687 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
688 		capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
689 	}
690 
691 	capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
692 			    ICP_ACCEL_CAPABILITIES_SM2 |
693 			    ICP_ACCEL_CAPABILITIES_ECEDMONT;
694 
695 	if (fusectl1 & ICP_ACCEL_GEN6_MASK_PKE_SLICE) {
696 		capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
697 		capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
698 		capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
699 	}
700 
701 	capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
702 			  ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
703 			  ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
704 			  ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
705 
706 	if (fusectl1 & ICP_ACCEL_GEN6_MASK_CPR_SLICE) {
707 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
708 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
709 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
710 		capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
711 	}
712 
713 	if (adf_get_service_mask(accel_dev, &mask))
714 		return 0;
715 
716 	if (test_bit(SVC_ASYM, &mask))
717 		caps |= capabilities_asym;
718 	if (test_bit(SVC_SYM, &mask))
719 		caps |= capabilities_sym;
720 	if (test_bit(SVC_DC, &mask) || test_bit(SVC_DECOMP, &mask))
721 		caps |= capabilities_dc;
722 	if (test_bit(SVC_DCC, &mask)) {
723 		/*
724 		 * Sym capabilities are available for chaining operations,
725 		 * but sym crypto instances cannot be supported
726 		 */
727 		caps = capabilities_dc | capabilities_sym;
728 		caps &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
729 	}
730 
731 	return caps;
732 }
733 
734 static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev)
735 {
736 	return ARRAY_SIZE(adf_default_fw_config);
737 }
738 
739 static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num)
740 {
741 	int num_fw_objs = ARRAY_SIZE(adf_6xxx_fw_objs);
742 	int id;
743 
744 	id = adf_default_fw_config[obj_num].obj;
745 	if (id >= num_fw_objs)
746 		return NULL;
747 
748 	return adf_6xxx_fw_objs[id];
749 }
750 
751 static const char *uof_get_name_6xxx(struct adf_accel_dev *accel_dev, u32 obj_num)
752 {
753 	return uof_get_name(accel_dev, obj_num);
754 }
755 
756 static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num)
757 {
758 	if (obj_num >= uof_get_num_objs(accel_dev))
759 		return -EINVAL;
760 
761 	return adf_default_fw_config[obj_num].obj;
762 }
763 
764 static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num)
765 {
766 	return adf_default_fw_config[obj_num].ae_mask;
767 }
768 
769 static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev)
770 {
771 	if (adf_gen6_init_thd2arb_map(accel_dev))
772 		dev_warn(&GET_DEV(accel_dev),
773 			 "Failed to generate thread to arbiter mapping");
774 
775 	return GET_HW_DATA(accel_dev)->thd_to_arb_map;
776 }
777 
778 static int adf_init_device(struct adf_accel_dev *accel_dev)
779 {
780 	void __iomem *addr = adf_get_pmisc_base(accel_dev);
781 	u32 status;
782 	u32 csr;
783 	int ret;
784 
785 	/* Temporarily mask PM interrupt */
786 	csr = ADF_CSR_RD(addr, ADF_GEN6_ERRMSK2);
787 	csr |= ADF_GEN6_PM_SOU;
788 	ADF_CSR_WR(addr, ADF_GEN6_ERRMSK2, csr);
789 
790 	/* Set DRV_ACTIVE bit to power up the device */
791 	ADF_CSR_WR(addr, ADF_GEN6_PM_INTERRUPT, ADF_GEN6_PM_DRV_ACTIVE);
792 
793 	/* Poll status register to make sure the device is powered up */
794 	ret = read_poll_timeout(ADF_CSR_RD, status,
795 				status & ADF_GEN6_PM_INIT_STATE,
796 				ADF_GEN6_PM_POLL_DELAY_US,
797 				ADF_GEN6_PM_POLL_TIMEOUT_US, true, addr,
798 				ADF_GEN6_PM_STATUS);
799 	if (ret) {
800 		dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
801 		return ret;
802 	}
803 
804 	dev_dbg(&GET_DEV(accel_dev), "Setting virtual channels for device qat_dev%d\n",
805 		accel_dev->accel_id);
806 
807 	ret = adf_gen6_set_vc(accel_dev);
808 	if (ret)
809 		dev_err(&GET_DEV(accel_dev), "Failed to set virtual channels\n");
810 
811 	return ret;
812 }
813 
814 static int enable_pm(struct adf_accel_dev *accel_dev)
815 {
816 	int ret;
817 
818 	ret = adf_init_admin_pm(accel_dev, ADF_GEN6_PM_DEFAULT_IDLE_FILTER);
819 	if (ret)
820 		return ret;
821 
822 	/* Initialize PM internal data */
823 	adf_gen6_init_dev_pm_data(accel_dev);
824 
825 	return 0;
826 }
827 
828 static int dev_config(struct adf_accel_dev *accel_dev)
829 {
830 	int ret;
831 
832 	ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC);
833 	if (ret)
834 		return ret;
835 
836 	ret = adf_cfg_section_add(accel_dev, "Accelerator0");
837 	if (ret)
838 		return ret;
839 
840 	switch (adf_get_service_enabled(accel_dev)) {
841 	case SVC_DC:
842 	case SVC_DCC:
843 		ret = adf_gen6_comp_dev_config(accel_dev);
844 		break;
845 	default:
846 		ret = adf_gen6_no_dev_config(accel_dev);
847 		break;
848 	}
849 	if (ret)
850 		return ret;
851 
852 	__set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
853 
854 	return ret;
855 }
856 
857 static void adf_gen6_init_rl_data(struct adf_rl_hw_data *rl_data)
858 {
859 	rl_data->pciout_tb_offset = ADF_GEN6_RL_TOKEN_PCIEOUT_BUCKET_OFFSET;
860 	rl_data->pciin_tb_offset = ADF_GEN6_RL_TOKEN_PCIEIN_BUCKET_OFFSET;
861 	rl_data->r2l_offset = ADF_GEN6_RL_R2L_OFFSET;
862 	rl_data->l2c_offset = ADF_GEN6_RL_L2C_OFFSET;
863 	rl_data->c2s_offset = ADF_GEN6_RL_C2S_OFFSET;
864 	rl_data->pcie_scale_div = ADF_6XXX_RL_PCIE_SCALE_FACTOR_DIV;
865 	rl_data->pcie_scale_mul = ADF_6XXX_RL_PCIE_SCALE_FACTOR_MUL;
866 	rl_data->max_tp[SVC_ASYM] = ADF_6XXX_RL_MAX_TP_ASYM;
867 	rl_data->max_tp[SVC_SYM] = ADF_6XXX_RL_MAX_TP_SYM;
868 	rl_data->max_tp[SVC_DC] = ADF_6XXX_RL_MAX_TP_DC;
869 	rl_data->max_tp[SVC_DECOMP] = ADF_6XXX_RL_MAX_TP_DECOMP;
870 	rl_data->scan_interval = ADF_6XXX_RL_SCANS_PER_SEC;
871 	rl_data->scale_ref = ADF_6XXX_RL_SLICE_REF;
872 
873 	init_num_svc_aes(rl_data);
874 }
875 
876 void adf_init_hw_data_6xxx(struct adf_hw_device_data *hw_data)
877 {
878 	hw_data->dev_class = &adf_6xxx_class;
879 	hw_data->instance_id = adf_6xxx_class.instances++;
880 	hw_data->num_banks = ADF_GEN6_ETR_MAX_BANKS;
881 	hw_data->num_banks_per_vf = ADF_GEN6_NUM_BANKS_PER_VF;
882 	hw_data->num_rings_per_bank = ADF_GEN6_NUM_RINGS_PER_BANK;
883 	hw_data->num_accel = ADF_GEN6_MAX_ACCELERATORS;
884 	hw_data->num_engines = ADF_6XXX_MAX_ACCELENGINES;
885 	hw_data->num_logical_accel = 1;
886 	hw_data->tx_rx_gap = ADF_GEN6_RX_RINGS_OFFSET;
887 	hw_data->tx_rings_mask = ADF_GEN6_TX_RINGS_MASK;
888 	hw_data->ring_to_svc_map = 0;
889 	hw_data->alloc_irq = adf_isr_resource_alloc;
890 	hw_data->free_irq = adf_isr_resource_free;
891 	hw_data->enable_error_correction = enable_error_correction;
892 	hw_data->get_accel_mask = get_accel_mask;
893 	hw_data->get_ae_mask = get_ae_mask;
894 	hw_data->get_num_accels = get_num_accels;
895 	hw_data->get_num_aes = get_num_aes;
896 	hw_data->get_sram_bar_id = get_sram_bar_id;
897 	hw_data->get_etr_bar_id = get_etr_bar_id;
898 	hw_data->get_misc_bar_id = get_misc_bar_id;
899 	hw_data->get_arb_info = get_arb_info;
900 	hw_data->get_admin_info = get_admin_info;
901 	hw_data->get_accel_cap = get_accel_cap;
902 	hw_data->get_sku = get_sku;
903 	hw_data->init_admin_comms = adf_init_admin_comms;
904 	hw_data->exit_admin_comms = adf_exit_admin_comms;
905 	hw_data->send_admin_init = adf_send_admin_init;
906 	hw_data->init_arb = adf_init_arb;
907 	hw_data->exit_arb = adf_exit_arb;
908 	hw_data->get_arb_mapping = adf_get_arbiter_mapping;
909 	hw_data->enable_ints = enable_ints;
910 	hw_data->reset_device = adf_reset_flr;
911 	hw_data->admin_ae_mask = ADF_6XXX_ADMIN_AE_MASK;
912 	hw_data->fw_name = ADF_6XXX_FW;
913 	hw_data->fw_mmp_name = ADF_6XXX_MMP;
914 	hw_data->uof_get_name = uof_get_name_6xxx;
915 	hw_data->uof_get_num_objs = uof_get_num_objs;
916 	hw_data->uof_get_obj_type = uof_get_obj_type;
917 	hw_data->uof_get_ae_mask = uof_get_ae_mask;
918 	hw_data->set_msix_rttable = set_msix_default_rttable;
919 	hw_data->set_ssm_wdtimer = set_ssm_wdtimer;
920 	hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
921 	hw_data->disable_iov = adf_disable_sriov;
922 	hw_data->ring_pair_reset = ring_pair_reset;
923 	hw_data->dev_config = dev_config;
924 	hw_data->bank_state_save = adf_bank_state_save;
925 	hw_data->bank_state_restore = adf_bank_state_restore;
926 	hw_data->get_hb_clock = get_heartbeat_clock;
927 	hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE;
928 	hw_data->start_timer = adf_timer_start;
929 	hw_data->stop_timer = adf_timer_stop;
930 	hw_data->init_device = adf_init_device;
931 	hw_data->enable_pm = enable_pm;
932 	hw_data->services_supported = services_supported;
933 	hw_data->num_rps = ADF_GEN6_ETR_MAX_BANKS;
934 	hw_data->clock_frequency = ADF_6XXX_AE_FREQ;
935 	hw_data->get_svc_slice_cnt = adf_gen6_get_svc_slice_cnt;
936 
937 	adf_gen6_init_hw_csr_ops(&hw_data->csr_ops);
938 	adf_gen6_init_pf_pfvf_ops(&hw_data->pfvf_ops);
939 	adf_gen6_init_dc_ops(&hw_data->dc_ops);
940 	adf_gen6_init_vf_mig_ops(&hw_data->vfmig_ops);
941 	adf_gen6_init_ras_ops(&hw_data->ras_ops);
942 	adf_gen6_init_tl_data(&hw_data->tl_data);
943 	adf_gen6_init_rl_data(&hw_data->rl_data);
944 }
945 
946 void adf_clean_hw_data_6xxx(struct adf_hw_device_data *hw_data)
947 {
948 	if (hw_data->dev_class->instances)
949 		hw_data->dev_class->instances--;
950 }
951