xref: /linux/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c (revision 621cde16e49b3ecf7d59a8106a20aaebfb4a59a9)
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2020 Intel Corporation */
3 #include <linux/iopoll.h>
4 #include <asm/div64.h>
5 #include "adf_accel_devices.h"
6 #include "adf_cfg_services.h"
7 #include "adf_common_drv.h"
8 #include "adf_fw_config.h"
9 #include "adf_gen4_hw_data.h"
10 #include "adf_gen4_pm.h"
11 
adf_gen4_get_accel_mask(struct adf_hw_device_data * self)12 u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self)
13 {
14 	return ADF_GEN4_ACCELERATORS_MASK;
15 }
16 EXPORT_SYMBOL_GPL(adf_gen4_get_accel_mask);
17 
adf_gen4_get_num_accels(struct adf_hw_device_data * self)18 u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self)
19 {
20 	return ADF_GEN4_MAX_ACCELERATORS;
21 }
22 EXPORT_SYMBOL_GPL(adf_gen4_get_num_accels);
23 
adf_gen4_get_num_aes(struct adf_hw_device_data * self)24 u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self)
25 {
26 	if (!self || !self->ae_mask)
27 		return 0;
28 
29 	return hweight32(self->ae_mask);
30 }
31 EXPORT_SYMBOL_GPL(adf_gen4_get_num_aes);
32 
adf_gen4_get_misc_bar_id(struct adf_hw_device_data * self)33 u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self)
34 {
35 	return ADF_GEN4_PMISC_BAR;
36 }
37 EXPORT_SYMBOL_GPL(adf_gen4_get_misc_bar_id);
38 
adf_gen4_get_etr_bar_id(struct adf_hw_device_data * self)39 u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self)
40 {
41 	return ADF_GEN4_ETR_BAR;
42 }
43 EXPORT_SYMBOL_GPL(adf_gen4_get_etr_bar_id);
44 
adf_gen4_get_sram_bar_id(struct adf_hw_device_data * self)45 u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self)
46 {
47 	return ADF_GEN4_SRAM_BAR;
48 }
49 EXPORT_SYMBOL_GPL(adf_gen4_get_sram_bar_id);
50 
adf_gen4_get_sku(struct adf_hw_device_data * self)51 enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self)
52 {
53 	return DEV_SKU_1;
54 }
55 EXPORT_SYMBOL_GPL(adf_gen4_get_sku);
56 
adf_gen4_get_arb_info(struct arb_info * arb_info)57 void adf_gen4_get_arb_info(struct arb_info *arb_info)
58 {
59 	arb_info->arb_cfg = ADF_GEN4_ARB_CONFIG;
60 	arb_info->arb_offset = ADF_GEN4_ARB_OFFSET;
61 	arb_info->wt2sam_offset = ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET;
62 }
63 EXPORT_SYMBOL_GPL(adf_gen4_get_arb_info);
64 
adf_gen4_get_admin_info(struct admin_info * admin_csrs_info)65 void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info)
66 {
67 	admin_csrs_info->mailbox_offset = ADF_GEN4_MAILBOX_BASE_OFFSET;
68 	admin_csrs_info->admin_msg_ur = ADF_GEN4_ADMINMSGUR_OFFSET;
69 	admin_csrs_info->admin_msg_lr = ADF_GEN4_ADMINMSGLR_OFFSET;
70 }
71 EXPORT_SYMBOL_GPL(adf_gen4_get_admin_info);
72 
adf_gen4_get_heartbeat_clock(struct adf_hw_device_data * self)73 u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self)
74 {
75 	/*
76 	 * GEN4 uses KPT counter for HB
77 	 */
78 	return ADF_GEN4_KPT_COUNTER_FREQ;
79 }
80 EXPORT_SYMBOL_GPL(adf_gen4_get_heartbeat_clock);
81 
adf_gen4_enable_error_correction(struct adf_accel_dev * accel_dev)82 void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev)
83 {
84 	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR];
85 	void __iomem *csr = misc_bar->virt_addr;
86 
87 	/* Enable all in errsou3 except VFLR notification on host */
88 	ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY);
89 }
90 EXPORT_SYMBOL_GPL(adf_gen4_enable_error_correction);
91 
adf_gen4_enable_ints(struct adf_accel_dev * accel_dev)92 void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev)
93 {
94 	void __iomem *addr;
95 
96 	addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
97 
98 	/* Enable bundle interrupts */
99 	ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET, 0);
100 	ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET, 0);
101 
102 	/* Enable misc interrupts */
103 	ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_MASK_OFFSET, 0);
104 }
105 EXPORT_SYMBOL_GPL(adf_gen4_enable_ints);
106 
adf_gen4_init_device(struct adf_accel_dev * accel_dev)107 int adf_gen4_init_device(struct adf_accel_dev *accel_dev)
108 {
109 	void __iomem *addr;
110 	u32 status;
111 	u32 csr;
112 	int ret;
113 
114 	addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
115 
116 	/* Temporarily mask PM interrupt */
117 	csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2);
118 	csr |= ADF_GEN4_PM_SOU;
119 	ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr);
120 
121 	/* Set DRV_ACTIVE bit to power up the device */
122 	ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE);
123 
124 	/* Poll status register to make sure the device is powered up */
125 	ret = read_poll_timeout(ADF_CSR_RD, status,
126 				status & ADF_GEN4_PM_INIT_STATE,
127 				ADF_GEN4_PM_POLL_DELAY_US,
128 				ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr,
129 				ADF_GEN4_PM_STATUS);
130 	if (ret)
131 		dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
132 
133 	return ret;
134 }
135 EXPORT_SYMBOL_GPL(adf_gen4_init_device);
136 
adf_gen4_unpack_ssm_wdtimer(u64 value,u32 * upper,u32 * lower)137 static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper,
138 					       u32 *lower)
139 {
140 	*lower = lower_32_bits(value);
141 	*upper = upper_32_bits(value);
142 }
143 
adf_gen4_set_ssm_wdtimer(struct adf_accel_dev * accel_dev)144 void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
145 {
146 	void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
147 	u64 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
148 	u64 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
149 	u32 ssm_wdt_pke_high = 0;
150 	u32 ssm_wdt_pke_low = 0;
151 	u32 ssm_wdt_high = 0;
152 	u32 ssm_wdt_low = 0;
153 
154 	/* Convert 64bit WDT timer value into 32bit values for
155 	 * mmio write to 32bit CSRs.
156 	 */
157 	adf_gen4_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low);
158 	adf_gen4_unpack_ssm_wdtimer(timer_val_pke, &ssm_wdt_pke_high,
159 				    &ssm_wdt_pke_low);
160 
161 	/* Enable WDT for sym and dc */
162 	ADF_CSR_WR(pmisc_addr, ADF_SSMWDTL_OFFSET, ssm_wdt_low);
163 	ADF_CSR_WR(pmisc_addr, ADF_SSMWDTH_OFFSET, ssm_wdt_high);
164 	/* Enable WDT for pke */
165 	ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEL_OFFSET, ssm_wdt_pke_low);
166 	ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKEH_OFFSET, ssm_wdt_pke_high);
167 }
168 EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer);
169 
170 /*
171  * The vector routing table is used to select the MSI-X entry to use for each
172  * interrupt source.
173  * The first ADF_GEN4_ETR_MAX_BANKS entries correspond to ring interrupts.
174  * The final entry corresponds to VF2PF or error interrupts.
175  * This vector table could be used to configure one MSI-X entry to be shared
176  * between multiple interrupt sources.
177  *
178  * The default routing is set to have a one to one correspondence between the
179  * interrupt source and the MSI-X entry used.
180  */
adf_gen4_set_msix_default_rttable(struct adf_accel_dev * accel_dev)181 void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev)
182 {
183 	void __iomem *csr;
184 	int i;
185 
186 	csr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr;
187 	for (i = 0; i <= ADF_GEN4_ETR_MAX_BANKS; i++)
188 		ADF_CSR_WR(csr, ADF_GEN4_MSIX_RTTABLE_OFFSET(i), i);
189 }
190 EXPORT_SYMBOL_GPL(adf_gen4_set_msix_default_rttable);
191 
adf_pfvf_comms_disabled(struct adf_accel_dev * accel_dev)192 int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
193 {
194 	return 0;
195 }
196 EXPORT_SYMBOL_GPL(adf_pfvf_comms_disabled);
197 
reset_ring_pair(void __iomem * csr,u32 bank_number)198 static int reset_ring_pair(void __iomem *csr, u32 bank_number)
199 {
200 	u32 status;
201 	int ret;
202 
203 	/* Write rpresetctl register BIT(0) as 1
204 	 * Since rpresetctl registers have no RW fields, no need to preserve
205 	 * values for other bits. Just write directly.
206 	 */
207 	ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
208 		   ADF_WQM_CSR_RPRESETCTL_RESET);
209 
210 	/* Read rpresetsts register and wait for rp reset to complete */
211 	ret = read_poll_timeout(ADF_CSR_RD, status,
212 				status & ADF_WQM_CSR_RPRESETSTS_STATUS,
213 				ADF_RPRESET_POLL_DELAY_US,
214 				ADF_RPRESET_POLL_TIMEOUT_US, true,
215 				csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
216 	if (!ret) {
217 		/* When rp reset is done, clear rpresetsts */
218 		ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
219 			   ADF_WQM_CSR_RPRESETSTS_STATUS);
220 	}
221 
222 	return ret;
223 }
224 
adf_gen4_ring_pair_reset(struct adf_accel_dev * accel_dev,u32 bank_number)225 int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
226 {
227 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
228 	void __iomem *csr = adf_get_etr_base(accel_dev);
229 	int ret;
230 
231 	if (bank_number >= hw_data->num_banks)
232 		return -EINVAL;
233 
234 	dev_dbg(&GET_DEV(accel_dev),
235 		"ring pair reset for bank:%d\n", bank_number);
236 
237 	ret = reset_ring_pair(csr, bank_number);
238 	if (ret)
239 		dev_err(&GET_DEV(accel_dev),
240 			"ring pair reset failed (timeout)\n");
241 	else
242 		dev_dbg(&GET_DEV(accel_dev), "ring pair reset successful\n");
243 
244 	return ret;
245 }
246 EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset);
247 
248 static const u32 thrd_to_arb_map_dcc[] = {
249 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
250 	0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF,
251 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
252 	0x00000000, 0x00000000, 0x00000000, 0x00000000,
253 	0x0
254 };
255 
256 static const u16 rp_group_to_arb_mask[] = {
257 	[RP_GROUP_0] = 0x5,
258 	[RP_GROUP_1] = 0xA,
259 };
260 
is_single_service(int service_id)261 static bool is_single_service(int service_id)
262 {
263 	switch (service_id) {
264 	case SVC_DC:
265 	case SVC_SYM:
266 	case SVC_ASYM:
267 		return true;
268 	case SVC_CY:
269 	case SVC_CY2:
270 	case SVC_DCC:
271 	case SVC_ASYM_DC:
272 	case SVC_DC_ASYM:
273 	case SVC_SYM_DC:
274 	case SVC_DC_SYM:
275 	default:
276 		return false;
277 	}
278 }
279 
adf_gen4_init_thd2arb_map(struct adf_accel_dev * accel_dev)280 int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev)
281 {
282 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
283 	u32 *thd2arb_map = hw_data->thd_to_arb_map;
284 	unsigned int ae_cnt, worker_obj_cnt, i, j;
285 	unsigned long ae_mask, thds_mask;
286 	int srv_id, rp_group;
287 	u32 thd2arb_map_base;
288 	u16 arb_mask;
289 
290 	if (!hw_data->get_rp_group || !hw_data->get_ena_thd_mask ||
291 	    !hw_data->get_num_aes || !hw_data->uof_get_num_objs ||
292 	    !hw_data->uof_get_ae_mask)
293 		return -EFAULT;
294 
295 	srv_id = adf_get_service_enabled(accel_dev);
296 	if (srv_id < 0)
297 		return srv_id;
298 
299 	ae_cnt = hw_data->get_num_aes(hw_data);
300 	worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) -
301 			 ADF_GEN4_ADMIN_ACCELENGINES;
302 
303 	if (srv_id == SVC_DCC) {
304 		if (ae_cnt > ICP_QAT_HW_AE_DELIMITER)
305 			return -EINVAL;
306 
307 		memcpy(thd2arb_map, thrd_to_arb_map_dcc,
308 		       array_size(sizeof(*thd2arb_map), ae_cnt));
309 		return 0;
310 	}
311 
312 	for (i = 0; i < worker_obj_cnt; i++) {
313 		ae_mask = hw_data->uof_get_ae_mask(accel_dev, i);
314 		rp_group = hw_data->get_rp_group(accel_dev, ae_mask);
315 		thds_mask = hw_data->get_ena_thd_mask(accel_dev, i);
316 		thd2arb_map_base = 0;
317 
318 		if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0)
319 			return -EINVAL;
320 
321 		if (thds_mask == ADF_GEN4_ENA_THD_MASK_ERROR)
322 			return -EINVAL;
323 
324 		if (is_single_service(srv_id))
325 			arb_mask = rp_group_to_arb_mask[RP_GROUP_0] |
326 				   rp_group_to_arb_mask[RP_GROUP_1];
327 		else
328 			arb_mask = rp_group_to_arb_mask[rp_group];
329 
330 		for_each_set_bit(j, &thds_mask, ADF_NUM_THREADS_PER_AE)
331 			thd2arb_map_base |= arb_mask << (j * 4);
332 
333 		for_each_set_bit(j, &ae_mask, ae_cnt)
334 			thd2arb_map[j] = thd2arb_map_base;
335 	}
336 	return 0;
337 }
338 EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map);
339 
adf_gen4_get_ring_to_svc_map(struct adf_accel_dev * accel_dev)340 u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev)
341 {
342 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
343 	enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { };
344 	unsigned int ae_mask, start_id, worker_obj_cnt, i;
345 	u16 ring_to_svc_map;
346 	int rp_group;
347 
348 	if (!hw_data->get_rp_group || !hw_data->uof_get_ae_mask ||
349 	    !hw_data->uof_get_obj_type || !hw_data->uof_get_num_objs)
350 		return 0;
351 
352 	/* If dcc, all rings handle compression requests */
353 	if (adf_get_service_enabled(accel_dev) == SVC_DCC) {
354 		for (i = 0; i < RP_GROUP_COUNT; i++)
355 			rps[i] = COMP;
356 		goto set_mask;
357 	}
358 
359 	worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) -
360 			 ADF_GEN4_ADMIN_ACCELENGINES;
361 	start_id = worker_obj_cnt - RP_GROUP_COUNT;
362 
363 	for (i = start_id; i < worker_obj_cnt; i++) {
364 		ae_mask = hw_data->uof_get_ae_mask(accel_dev, i);
365 		rp_group = hw_data->get_rp_group(accel_dev, ae_mask);
366 		if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0)
367 			return 0;
368 
369 		switch (hw_data->uof_get_obj_type(accel_dev, i)) {
370 		case ADF_FW_SYM_OBJ:
371 			rps[rp_group] = SYM;
372 			break;
373 		case ADF_FW_ASYM_OBJ:
374 			rps[rp_group] = ASYM;
375 			break;
376 		case ADF_FW_DC_OBJ:
377 			rps[rp_group] = COMP;
378 			break;
379 		default:
380 			rps[rp_group] = 0;
381 			break;
382 		}
383 	}
384 
385 set_mask:
386 	ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT |
387 			  rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT |
388 			  rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT |
389 			  rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT;
390 
391 	return ring_to_svc_map;
392 }
393 EXPORT_SYMBOL_GPL(adf_gen4_get_ring_to_svc_map);
394 
395 /*
396  * adf_gen4_bank_quiesce_coal_timer() - quiesce bank coalesced interrupt timer
397  * @accel_dev: Pointer to the device structure
398  * @bank_idx: Offset to the bank within this device
399  * @timeout_ms: Timeout in milliseconds for the operation
400  *
401  * This function tries to quiesce the coalesced interrupt timer of a bank if
402  * it has been enabled and triggered.
403  *
404  * Returns 0 on success, error code otherwise
405  *
406  */
adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev * accel_dev,u32 bank_idx,int timeout_ms)407 int adf_gen4_bank_quiesce_coal_timer(struct adf_accel_dev *accel_dev,
408 				     u32 bank_idx, int timeout_ms)
409 {
410 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
411 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
412 	void __iomem *csr_misc = adf_get_pmisc_base(accel_dev);
413 	void __iomem *csr_etr = adf_get_etr_base(accel_dev);
414 	u32 int_col_ctl, int_col_mask, int_col_en;
415 	u32 e_stat, intsrc;
416 	u64 wait_us;
417 	int ret;
418 
419 	if (timeout_ms < 0)
420 		return -EINVAL;
421 
422 	int_col_ctl = csr_ops->read_csr_int_col_ctl(csr_etr, bank_idx);
423 	int_col_mask = csr_ops->get_int_col_ctl_enable_mask();
424 	if (!(int_col_ctl & int_col_mask))
425 		return 0;
426 
427 	int_col_en = csr_ops->read_csr_int_col_en(csr_etr, bank_idx);
428 	int_col_en &= BIT(ADF_WQM_CSR_RP_IDX_RX);
429 
430 	e_stat = csr_ops->read_csr_e_stat(csr_etr, bank_idx);
431 	if (!(~e_stat & int_col_en))
432 		return 0;
433 
434 	wait_us = 2 * ((int_col_ctl & ~int_col_mask) << 8) * USEC_PER_SEC;
435 	do_div(wait_us, hw_data->clock_frequency);
436 	wait_us = min(wait_us, (u64)timeout_ms * USEC_PER_MSEC);
437 	dev_dbg(&GET_DEV(accel_dev),
438 		"wait for bank %d - coalesced timer expires in %llu us (max=%u ms estat=0x%x intcolen=0x%x)\n",
439 		bank_idx, wait_us, timeout_ms, e_stat, int_col_en);
440 
441 	ret = read_poll_timeout(ADF_CSR_RD, intsrc, intsrc,
442 				ADF_COALESCED_POLL_DELAY_US, wait_us, true,
443 				csr_misc, ADF_WQM_CSR_RPINTSOU(bank_idx));
444 	if (ret)
445 		dev_warn(&GET_DEV(accel_dev),
446 			 "coalesced timer for bank %d expired (%llu us)\n",
447 			 bank_idx, wait_us);
448 
449 	return ret;
450 }
451 EXPORT_SYMBOL_GPL(adf_gen4_bank_quiesce_coal_timer);
452 
drain_bank(void __iomem * csr,u32 bank_number,int timeout_us)453 static int drain_bank(void __iomem *csr, u32 bank_number, int timeout_us)
454 {
455 	u32 status;
456 
457 	ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETCTL(bank_number),
458 		   ADF_WQM_CSR_RPRESETCTL_DRAIN);
459 
460 	return read_poll_timeout(ADF_CSR_RD, status,
461 				status & ADF_WQM_CSR_RPRESETSTS_STATUS,
462 				ADF_RPRESET_POLL_DELAY_US, timeout_us, true,
463 				csr, ADF_WQM_CSR_RPRESETSTS(bank_number));
464 }
465 
adf_gen4_bank_drain_finish(struct adf_accel_dev * accel_dev,u32 bank_number)466 void adf_gen4_bank_drain_finish(struct adf_accel_dev *accel_dev,
467 				u32 bank_number)
468 {
469 	void __iomem *csr = adf_get_etr_base(accel_dev);
470 
471 	ADF_CSR_WR(csr, ADF_WQM_CSR_RPRESETSTS(bank_number),
472 		   ADF_WQM_CSR_RPRESETSTS_STATUS);
473 }
474 
adf_gen4_bank_drain_start(struct adf_accel_dev * accel_dev,u32 bank_number,int timeout_us)475 int adf_gen4_bank_drain_start(struct adf_accel_dev *accel_dev,
476 			      u32 bank_number, int timeout_us)
477 {
478 	void __iomem *csr = adf_get_etr_base(accel_dev);
479 	int ret;
480 
481 	dev_dbg(&GET_DEV(accel_dev), "Drain bank %d\n", bank_number);
482 
483 	ret = drain_bank(csr, bank_number, timeout_us);
484 	if (ret)
485 		dev_err(&GET_DEV(accel_dev), "Bank drain failed (timeout)\n");
486 	else
487 		dev_dbg(&GET_DEV(accel_dev), "Bank drain successful\n");
488 
489 	return ret;
490 }
491 
bank_state_save(struct adf_hw_csr_ops * ops,void __iomem * base,u32 bank,struct bank_state * state,u32 num_rings)492 static void bank_state_save(struct adf_hw_csr_ops *ops, void __iomem *base,
493 			    u32 bank, struct bank_state *state, u32 num_rings)
494 {
495 	u32 i;
496 
497 	state->ringstat0 = ops->read_csr_stat(base, bank);
498 	state->ringuostat = ops->read_csr_uo_stat(base, bank);
499 	state->ringestat = ops->read_csr_e_stat(base, bank);
500 	state->ringnestat = ops->read_csr_ne_stat(base, bank);
501 	state->ringnfstat = ops->read_csr_nf_stat(base, bank);
502 	state->ringfstat = ops->read_csr_f_stat(base, bank);
503 	state->ringcstat0 = ops->read_csr_c_stat(base, bank);
504 	state->iaintflagen = ops->read_csr_int_en(base, bank);
505 	state->iaintflagreg = ops->read_csr_int_flag(base, bank);
506 	state->iaintflagsrcsel0 = ops->read_csr_int_srcsel(base, bank);
507 	state->iaintcolen = ops->read_csr_int_col_en(base, bank);
508 	state->iaintcolctl = ops->read_csr_int_col_ctl(base, bank);
509 	state->iaintflagandcolen = ops->read_csr_int_flag_and_col(base, bank);
510 	state->ringexpstat = ops->read_csr_exp_stat(base, bank);
511 	state->ringexpintenable = ops->read_csr_exp_int_en(base, bank);
512 	state->ringsrvarben = ops->read_csr_ring_srv_arb_en(base, bank);
513 
514 	for (i = 0; i < num_rings; i++) {
515 		state->rings[i].head = ops->read_csr_ring_head(base, bank, i);
516 		state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i);
517 		state->rings[i].config = ops->read_csr_ring_config(base, bank, i);
518 		state->rings[i].base = ops->read_csr_ring_base(base, bank, i);
519 	}
520 }
521 
522 #define CHECK_STAT(op, expect_val, name, args...) \
523 ({ \
524 	u32 __expect_val = (expect_val); \
525 	u32 actual_val = op(args); \
526 	(__expect_val == actual_val) ? 0 : \
527 		(pr_err("QAT: Fail to restore %s register. Expected 0x%x, actual 0x%x\n", \
528 			name, __expect_val, actual_val), -EINVAL); \
529 })
530 
bank_state_restore(struct adf_hw_csr_ops * ops,void __iomem * base,u32 bank,struct bank_state * state,u32 num_rings,int tx_rx_gap)531 static int bank_state_restore(struct adf_hw_csr_ops *ops, void __iomem *base,
532 			      u32 bank, struct bank_state *state, u32 num_rings,
533 			      int tx_rx_gap)
534 {
535 	u32 val, tmp_val, i;
536 	int ret;
537 
538 	for (i = 0; i < num_rings; i++)
539 		ops->write_csr_ring_base(base, bank, i, state->rings[i].base);
540 
541 	for (i = 0; i < num_rings; i++)
542 		ops->write_csr_ring_config(base, bank, i, state->rings[i].config);
543 
544 	for (i = 0; i < num_rings / 2; i++) {
545 		int tx = i * (tx_rx_gap + 1);
546 		int rx = tx + tx_rx_gap;
547 
548 		ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
549 		ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail);
550 
551 		/*
552 		 * The TX ring head needs to be updated again to make sure that
553 		 * the HW will not consider the ring as full when it is empty
554 		 * and the correct state flags are set to match the recovered state.
555 		 */
556 		if (state->ringestat & BIT(tx)) {
557 			val = ops->read_csr_int_srcsel(base, bank);
558 			val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK;
559 			ops->write_csr_int_srcsel_w_val(base, bank, val);
560 			ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head);
561 		}
562 
563 		ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
564 		val = ops->read_csr_int_srcsel(base, bank);
565 		val |= ADF_RP_INT_SRC_SEL_F_RISE_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
566 		ops->write_csr_int_srcsel_w_val(base, bank, val);
567 
568 		ops->write_csr_ring_head(base, bank, rx, state->rings[rx].head);
569 		val = ops->read_csr_int_srcsel(base, bank);
570 		val |= ADF_RP_INT_SRC_SEL_F_FALL_MASK << ADF_RP_INT_SRC_SEL_RANGE_WIDTH;
571 		ops->write_csr_int_srcsel_w_val(base, bank, val);
572 
573 		/*
574 		 * The RX ring tail needs to be updated again to make sure that
575 		 * the HW will not consider the ring as empty when it is full
576 		 * and the correct state flags are set to match the recovered state.
577 		 */
578 		if (state->ringfstat & BIT(rx))
579 			ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail);
580 	}
581 
582 	ops->write_csr_int_flag_and_col(base, bank, state->iaintflagandcolen);
583 	ops->write_csr_int_en(base, bank, state->iaintflagen);
584 	ops->write_csr_int_col_en(base, bank, state->iaintcolen);
585 	ops->write_csr_int_srcsel_w_val(base, bank, state->iaintflagsrcsel0);
586 	ops->write_csr_exp_int_en(base, bank, state->ringexpintenable);
587 	ops->write_csr_int_col_ctl(base, bank, state->iaintcolctl);
588 	ops->write_csr_ring_srv_arb_en(base, bank, state->ringsrvarben);
589 
590 	/* Check that all ring statuses match the saved state. */
591 	ret = CHECK_STAT(ops->read_csr_stat, state->ringstat0, "ringstat",
592 			 base, bank);
593 	if (ret)
594 		return ret;
595 
596 	ret = CHECK_STAT(ops->read_csr_e_stat, state->ringestat, "ringestat",
597 			 base, bank);
598 	if (ret)
599 		return ret;
600 
601 	ret = CHECK_STAT(ops->read_csr_ne_stat, state->ringnestat, "ringnestat",
602 			 base, bank);
603 	if (ret)
604 		return ret;
605 
606 	ret = CHECK_STAT(ops->read_csr_nf_stat, state->ringnfstat, "ringnfstat",
607 			 base, bank);
608 	if (ret)
609 		return ret;
610 
611 	ret = CHECK_STAT(ops->read_csr_f_stat, state->ringfstat, "ringfstat",
612 			 base, bank);
613 	if (ret)
614 		return ret;
615 
616 	ret = CHECK_STAT(ops->read_csr_c_stat, state->ringcstat0, "ringcstat",
617 			 base, bank);
618 	if (ret)
619 		return ret;
620 
621 	tmp_val = ops->read_csr_exp_stat(base, bank);
622 	val = state->ringexpstat;
623 	if (tmp_val && !val) {
624 		pr_err("QAT: Bank was restored with exception: 0x%x\n", val);
625 		return -EINVAL;
626 	}
627 
628 	return 0;
629 }
630 
adf_gen4_bank_state_save(struct adf_accel_dev * accel_dev,u32 bank_number,struct bank_state * state)631 int adf_gen4_bank_state_save(struct adf_accel_dev *accel_dev, u32 bank_number,
632 			     struct bank_state *state)
633 {
634 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
635 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
636 	void __iomem *csr_base = adf_get_etr_base(accel_dev);
637 
638 	if (bank_number >= hw_data->num_banks || !state)
639 		return -EINVAL;
640 
641 	dev_dbg(&GET_DEV(accel_dev), "Saving state of bank %d\n", bank_number);
642 
643 	bank_state_save(csr_ops, csr_base, bank_number, state,
644 			hw_data->num_rings_per_bank);
645 
646 	return 0;
647 }
648 EXPORT_SYMBOL_GPL(adf_gen4_bank_state_save);
649 
adf_gen4_bank_state_restore(struct adf_accel_dev * accel_dev,u32 bank_number,struct bank_state * state)650 int adf_gen4_bank_state_restore(struct adf_accel_dev *accel_dev, u32 bank_number,
651 				struct bank_state *state)
652 {
653 	struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev);
654 	struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
655 	void __iomem *csr_base = adf_get_etr_base(accel_dev);
656 	int ret;
657 
658 	if (bank_number >= hw_data->num_banks  || !state)
659 		return -EINVAL;
660 
661 	dev_dbg(&GET_DEV(accel_dev), "Restoring state of bank %d\n", bank_number);
662 
663 	ret = bank_state_restore(csr_ops, csr_base, bank_number, state,
664 				 hw_data->num_rings_per_bank, hw_data->tx_rx_gap);
665 	if (ret)
666 		dev_err(&GET_DEV(accel_dev),
667 			"Unable to restore state of bank %d\n", bank_number);
668 
669 	return ret;
670 }
671 EXPORT_SYMBOL_GPL(adf_gen4_bank_state_restore);
672