xref: /freebsd/sys/dev/qat/qat_hw/qat_c4xxx/adf_c4xxx_hw_data.c (revision 71625ec9ad2a9bc8c09784fbd23b759830e0ee5f)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 #include <linux/atomic.h>
4 #include <linux/compiler.h>
5 #include <adf_accel_devices.h>
6 #include <adf_common_drv.h>
7 #include <adf_pfvf_msg.h>
8 #include <adf_dev_err.h>
9 #include <adf_cfg.h>
10 #include <adf_fw_counters.h>
11 #include <adf_gen2_hw_data.h>
12 #include <adf_gen2_pfvf.h>
13 #include "adf_c4xxx_hw_data.h"
14 #include "adf_c4xxx_reset.h"
15 #include "adf_c4xxx_inline.h"
16 #include "adf_c4xxx_ras.h"
17 #include "adf_c4xxx_misc_error_stats.h"
18 #include "adf_c4xxx_pke_replay_stats.h"
19 #include "adf_heartbeat.h"
20 #include "icp_qat_fw_init_admin.h"
21 #include "icp_qat_hw.h"
22 
23 /* accel unit information */
24 static struct adf_accel_unit adf_c4xxx_au_32_ae[] =
25     { { 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL },
26       { 0x2, 0xC, 0xFC0, 0x6C0, 6, ADF_ACCEL_SERVICE_NULL },
27       { 0x4, 0x30, 0xF000, 0xF000, 4, ADF_ACCEL_SERVICE_NULL },
28       { 0x8, 0xC0, 0x3F0000, 0x1B0000, 6, ADF_ACCEL_SERVICE_NULL },
29       { 0x10, 0x300, 0xFC00000, 0x6C00000, 6, ADF_ACCEL_SERVICE_NULL },
30       { 0x20, 0xC00, 0xF0000000, 0xF0000000, 4, ADF_ACCEL_SERVICE_NULL } };
31 
32 static struct adf_accel_unit adf_c4xxx_au_24_ae[] = {
33 	{ 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL },
34 	{ 0x2, 0xC, 0xFC0, 0x6C0, 6, ADF_ACCEL_SERVICE_NULL },
35 	{ 0x8, 0xC0, 0x3F0000, 0x1B0000, 6, ADF_ACCEL_SERVICE_NULL },
36 	{ 0x10, 0x300, 0xFC00000, 0x6C00000, 6, ADF_ACCEL_SERVICE_NULL },
37 };
38 
39 static struct adf_accel_unit adf_c4xxx_au_12_ae[] = {
40 	{ 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL },
41 	{ 0x8, 0xC0, 0x3F0000, 0x1B0000, 6, ADF_ACCEL_SERVICE_NULL },
42 };
43 
44 static struct adf_accel_unit adf_c4xxx_au_emulation[] =
45     { { 0x1, 0x3, 0x3F, 0x1B, 6, ADF_ACCEL_SERVICE_NULL },
46       { 0x2, 0xC, 0xC0, 0xC0, 2, ADF_ACCEL_SERVICE_NULL } };
47 
48 /* Accel engine threads for each of the following services
49  * <num_asym_thd> , <num_sym_thd> , <num_dc_thd>,
50  */
51 
52 /* Thread mapping for SKU capable of symmetric cryptography */
53 static const struct adf_ae_info adf_c4xxx_32_ae_sym[] =
54     { { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 },
55       { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 },
56       { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 2, 6, 3 },
57       { 2, 6, 3 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 },
58       { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 },
59       { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 },
60       { 2, 6, 3 }, { 2, 6, 3 } };
61 
62 static const struct adf_ae_info adf_c4xxx_24_ae_sym[] =
63     { { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 },
64       { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 },
65       { 2, 6, 3 }, { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
66       { 0, 0, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 },
67       { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 },
68       { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
69       { 0, 0, 0 }, { 0, 0, 0 } };
70 
71 static const struct adf_ae_info adf_c4xxx_12_ae_sym[] =
72     { { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 }, { 2, 6, 3 },
73       { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
74       { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
75       { 0, 0, 0 }, { 2, 6, 3 }, { 2, 6, 3 }, { 1, 7, 0 }, { 2, 6, 3 },
76       { 2, 6, 3 }, { 1, 7, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
77       { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
78       { 0, 0, 0 }, { 0, 0, 0 } };
79 
80 /* Thread mapping for SKU capable of asymmetric and symmetric cryptography */
81 static const struct adf_ae_info adf_c4xxx_32_ae[] =
82     { { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 },
83       { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 },
84       { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 2, 5, 3 },
85       { 2, 5, 3 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 },
86       { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 },
87       { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 },
88       { 2, 5, 3 }, { 2, 5, 3 } };
89 
90 static const struct adf_ae_info adf_c4xxx_24_ae[] =
91     { { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 },
92       { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 },
93       { 2, 5, 3 }, { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
94       { 0, 0, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 },
95       { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 },
96       { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
97       { 0, 0, 0 }, { 0, 0, 0 } };
98 
99 static const struct adf_ae_info adf_c4xxx_12_ae[] =
100     { { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 }, { 2, 5, 3 },
101       { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
102       { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
103       { 0, 0, 0 }, { 2, 5, 3 }, { 2, 5, 3 }, { 1, 6, 0 }, { 2, 5, 3 },
104       { 2, 5, 3 }, { 1, 6, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
105       { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 },
106       { 0, 0, 0 }, { 0, 0, 0 } };
107 
108 static struct adf_hw_device_class c4xxx_class = {.name = ADF_C4XXX_DEVICE_NAME,
109 						 .type = DEV_C4XXX,
110 						 .instances = 0 };
111 
112 struct icp_qat_fw_init_c4xxx_admin_hb_stats {
113 	struct icp_qat_fw_init_admin_hb_cnt stats[ADF_NUM_THREADS_PER_AE];
114 };
115 
116 struct adf_hb_count {
117 	u16 ae_thread[ADF_NUM_THREADS_PER_AE];
118 };
119 
120 static const int sku_cy_au[] = ADF_C4XXX_NUM_CY_AU;
121 static const int sku_dc_au[] = ADF_C4XXX_NUM_DC_AU;
122 static const int sku_inline_au[] = ADF_C4XXX_NUM_INLINE_AU;
123 
124 /*
125  * C4xxx devices introduce new fuses and soft straps and
126  * are different from previous gen device implementations.
127  */
128 
129 static u32
get_accel_mask(struct adf_accel_dev * accel_dev)130 get_accel_mask(struct adf_accel_dev *accel_dev)
131 {
132 	device_t pdev = accel_dev->accel_pci_dev.pci_dev;
133 	u32 fusectl0;
134 	u32 softstrappull0;
135 
136 	fusectl0 = pci_read_config(pdev, ADF_C4XXX_FUSECTL0_OFFSET, 4);
137 	softstrappull0 =
138 	    pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL0_OFFSET, 4);
139 
140 	return (~(fusectl0 | softstrappull0)) & ADF_C4XXX_ACCELERATORS_MASK;
141 }
142 
143 static u32
get_ae_mask(struct adf_accel_dev * accel_dev)144 get_ae_mask(struct adf_accel_dev *accel_dev)
145 {
146 	device_t pdev = accel_dev->accel_pci_dev.pci_dev;
147 	u32 fusectl1;
148 	u32 softstrappull1;
149 
150 	fusectl1 = pci_read_config(pdev, ADF_C4XXX_FUSECTL1_OFFSET, 4);
151 	softstrappull1 =
152 	    pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL1_OFFSET, 4);
153 
154 	/* Assume that AE and AU disable masks are consistent, so no
155 	 * checks against the AU mask are performed
156 	 */
157 	return (~(fusectl1 | softstrappull1)) & ADF_C4XXX_ACCELENGINES_MASK;
158 }
159 
160 static u32
get_num_accels(struct adf_hw_device_data * self)161 get_num_accels(struct adf_hw_device_data *self)
162 {
163 	return self ? hweight32(self->accel_mask) : 0;
164 }
165 
166 static u32
get_num_aes(struct adf_hw_device_data * self)167 get_num_aes(struct adf_hw_device_data *self)
168 {
169 	return self ? hweight32(self->ae_mask) : 0;
170 }
171 
172 static u32
get_misc_bar_id(struct adf_hw_device_data * self)173 get_misc_bar_id(struct adf_hw_device_data *self)
174 {
175 	return ADF_C4XXX_PMISC_BAR;
176 }
177 
178 static u32
get_etr_bar_id(struct adf_hw_device_data * self)179 get_etr_bar_id(struct adf_hw_device_data *self)
180 {
181 	return ADF_C4XXX_ETR_BAR;
182 }
183 
184 static u32
get_sram_bar_id(struct adf_hw_device_data * self)185 get_sram_bar_id(struct adf_hw_device_data *self)
186 {
187 	return ADF_C4XXX_SRAM_BAR;
188 }
189 
190 static inline void
c4xxx_unpack_ssm_wdtimer(u64 value,u32 * upper,u32 * lower)191 c4xxx_unpack_ssm_wdtimer(u64 value, u32 *upper, u32 *lower)
192 {
193 	*lower = lower_32_bits(value);
194 	*upper = upper_32_bits(value);
195 }
196 
197 /**
198  * c4xxx_set_ssm_wdtimer() - Initialize the slice hang watchdog timer.
199  *
200  * @param accel_dev    Structure holding accelerator data.
201  * @return 0 on success, error code otherwise.
202  */
203 static int
c4xxx_set_ssm_wdtimer(struct adf_accel_dev * accel_dev)204 c4xxx_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
205 {
206 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
207 	struct adf_bar *misc_bar =
208 	    &GET_BARS(accel_dev)[hw_device->get_misc_bar_id(hw_device)];
209 	struct resource *csr = misc_bar->virt_addr;
210 	unsigned long accel_mask = hw_device->accel_mask;
211 	u32 accel = 0;
212 	u64 timer_val = ADF_C4XXX_SSM_WDT_64BIT_DEFAULT_VALUE;
213 	u64 timer_val_pke = ADF_C4XXX_SSM_WDT_PKE_64BIT_DEFAULT_VALUE;
214 	u32 ssm_wdt_low = 0, ssm_wdt_high = 0;
215 	u32 ssm_wdt_pke_low = 0, ssm_wdt_pke_high = 0;
216 
217 	/* Convert 64bit Slice Hang watchdog value into 32bit values for
218 	 * mmio write to 32bit CSRs.
219 	 */
220 	c4xxx_unpack_ssm_wdtimer(timer_val, &ssm_wdt_high, &ssm_wdt_low);
221 	c4xxx_unpack_ssm_wdtimer(timer_val_pke,
222 				 &ssm_wdt_pke_high,
223 				 &ssm_wdt_pke_low);
224 
225 	/* Configures Slice Hang watchdogs */
226 	for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
227 	{
228 		ADF_CSR_WR(csr, ADF_C4XXX_SSMWDTL_OFFSET(accel), ssm_wdt_low);
229 		ADF_CSR_WR(csr, ADF_C4XXX_SSMWDTH_OFFSET(accel), ssm_wdt_high);
230 		ADF_CSR_WR(csr,
231 			   ADF_C4XXX_SSMWDTPKEL_OFFSET(accel),
232 			   ssm_wdt_pke_low);
233 		ADF_CSR_WR(csr,
234 			   ADF_C4XXX_SSMWDTPKEH_OFFSET(accel),
235 			   ssm_wdt_pke_high);
236 	}
237 
238 	return 0;
239 }
240 
241 /**
242  * c4xxx_check_slice_hang() - Check slice hang status
243  *
244  * Return: true if a slice hange interrupt is serviced..
245  */
246 static bool
c4xxx_check_slice_hang(struct adf_accel_dev * accel_dev)247 c4xxx_check_slice_hang(struct adf_accel_dev *accel_dev)
248 {
249 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
250 	struct adf_bar *misc_bar =
251 	    &GET_BARS(accel_dev)[hw_device->get_misc_bar_id(hw_device)];
252 	struct resource *csr = misc_bar->virt_addr;
253 	u32 slice_hang_offset;
254 	u32 ia_slice_hang_offset;
255 	u32 fw_irq_source;
256 	u32 ia_irq_source;
257 	u32 accel_num = 0;
258 	bool handled = false;
259 	u32 errsou10 = ADF_CSR_RD(csr, ADF_C4XXX_ERRSOU10);
260 	unsigned long accel_mask;
261 
262 	accel_mask = hw_device->accel_mask;
263 
264 	for_each_set_bit(accel_num, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
265 	{
266 		if (!(errsou10 & ADF_C4XXX_IRQ_SRC_MASK(accel_num)))
267 			continue;
268 
269 		fw_irq_source = ADF_CSR_RD(csr, ADF_INTSTATSSM(accel_num));
270 		ia_irq_source =
271 		    ADF_CSR_RD(csr, ADF_C4XXX_IAINTSTATSSM(accel_num));
272 		ia_slice_hang_offset =
273 		    ADF_C4XXX_IASLICEHANGSTATUS_OFFSET(accel_num);
274 
275 		/* FW did not clear SliceHang error, IA logs and clears
276 		 * the error
277 		 */
278 		if ((fw_irq_source & ADF_INTSTATSSM_SHANGERR) &&
279 		    (ia_irq_source & ADF_INTSTATSSM_SHANGERR)) {
280 			slice_hang_offset =
281 			    ADF_C4XXX_SLICEHANGSTATUS_OFFSET(accel_num);
282 
283 			/* Bring hung slice out of reset */
284 			adf_csr_fetch_and_and(csr, slice_hang_offset, ~0);
285 
286 			/* Log SliceHang error and clear an interrupt */
287 			handled = adf_handle_slice_hang(accel_dev,
288 							accel_num,
289 							csr,
290 							ia_slice_hang_offset);
291 			atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
292 		}
293 		/* FW cleared SliceHang, IA only logs an error */
294 		else if (!(fw_irq_source & ADF_INTSTATSSM_SHANGERR) &&
295 			 (ia_irq_source & ADF_INTSTATSSM_SHANGERR)) {
296 			/* Log SliceHang error and clear an interrupt */
297 			handled = adf_handle_slice_hang(accel_dev,
298 							accel_num,
299 							csr,
300 							ia_slice_hang_offset);
301 
302 			atomic_inc(&accel_dev->ras_counters[ADF_RAS_UNCORR]);
303 		}
304 
305 		/* Clear the associated IA interrupt */
306 		adf_csr_fetch_and_and(csr,
307 				      ADF_C4XXX_IAINTSTATSSM(accel_num),
308 				      ~BIT(13));
309 	}
310 
311 	return handled;
312 }
313 
314 static bool
get_eth_doorbell_msg(struct adf_accel_dev * accel_dev)315 get_eth_doorbell_msg(struct adf_accel_dev *accel_dev)
316 {
317 	struct resource *csr =
318 	    (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
319 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
320 	u32 errsou11 = ADF_CSR_RD(csr, ADF_C4XXX_ERRSOU11);
321 	u32 doorbell_int = ADF_CSR_RD(csr, ADF_C4XXX_ETH_DOORBELL_INT);
322 	u32 eth_doorbell_reg[ADF_C4XXX_NUM_ETH_DOORBELL_REGS];
323 	bool handled = false;
324 	u32 data_reg;
325 	u8 i;
326 
327 	/* Reset cannot be acknowledged until the reset */
328 	hw_device->reset_ack = false;
329 
330 	/* Check if doorbell interrupt occurred. */
331 	if (errsou11 & ADF_C4XXX_DOORBELL_INT_SRC) {
332 		/* Decode doorbell messages from ethernet device */
333 		for (i = 0; i < ADF_C4XXX_NUM_ETH_DOORBELL_REGS; i++) {
334 			eth_doorbell_reg[i] = 0;
335 			if (doorbell_int & BIT(i)) {
336 				data_reg = ADF_C4XXX_ETH_DOORBELL(i);
337 				eth_doorbell_reg[i] = ADF_CSR_RD(csr, data_reg);
338 				device_printf(
339 				    GET_DEV(accel_dev),
340 				    "Receives Doorbell message(0x%08x)\n",
341 				    eth_doorbell_reg[i]);
342 			}
343 		}
344 		/* Only need to check PF0 */
345 		if (eth_doorbell_reg[0] == ADF_C4XXX_IOSFSB_RESET_ACK) {
346 			device_printf(GET_DEV(accel_dev),
347 				      "Receives pending reset ACK\n");
348 			hw_device->reset_ack = true;
349 		}
350 		/* Clear the interrupt source */
351 		ADF_CSR_WR(csr,
352 			   ADF_C4XXX_ETH_DOORBELL_INT,
353 			   ADF_C4XXX_ETH_DOORBELL_MASK);
354 		handled = true;
355 	}
356 
357 	return handled;
358 }
359 
360 static enum dev_sku_info
get_sku(struct adf_hw_device_data * self)361 get_sku(struct adf_hw_device_data *self)
362 {
363 	int aes = get_num_aes(self);
364 	u32 capabilities = self->accel_capabilities_mask;
365 	bool sym_only_sku = false;
366 
367 	/* Check if SKU is capable only of symmetric cryptography
368 	 * via device capabilities.
369 	 */
370 	if ((capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC) &&
371 	    !(capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC) &&
372 	    !(capabilities & ADF_ACCEL_CAPABILITIES_COMPRESSION))
373 		sym_only_sku = true;
374 
375 	switch (aes) {
376 	case ADF_C4XXX_HIGH_SKU_AES:
377 		if (sym_only_sku)
378 			return DEV_SKU_1_CY;
379 		return DEV_SKU_1;
380 	case ADF_C4XXX_MED_SKU_AES:
381 		if (sym_only_sku)
382 			return DEV_SKU_2_CY;
383 		return DEV_SKU_2;
384 	case ADF_C4XXX_LOW_SKU_AES:
385 		if (sym_only_sku)
386 			return DEV_SKU_3_CY;
387 		return DEV_SKU_3;
388 	};
389 
390 	return DEV_SKU_UNKNOWN;
391 }
392 
393 static bool
c4xxx_check_prod_sku(struct adf_accel_dev * accel_dev)394 c4xxx_check_prod_sku(struct adf_accel_dev *accel_dev)
395 {
396 	device_t pdev = accel_dev->accel_pci_dev.pci_dev;
397 	u32 fusectl0 = 0;
398 
399 	fusectl0 = pci_read_config(pdev, ADF_C4XXX_FUSECTL0_OFFSET, 4);
400 
401 	if (fusectl0 & ADF_C4XXX_FUSE_PROD_SKU_MASK)
402 		return true;
403 	else
404 		return false;
405 }
406 
407 static bool
adf_check_sym_only_sku_c4xxx(struct adf_accel_dev * accel_dev)408 adf_check_sym_only_sku_c4xxx(struct adf_accel_dev *accel_dev)
409 {
410 	device_t pdev = accel_dev->accel_pci_dev.pci_dev;
411 	u32 legfuse = 0;
412 
413 	legfuse = pci_read_config(pdev, ADF_DEVICE_LEGFUSE_OFFSET, 4);
414 
415 	if (legfuse & ADF_C4XXX_LEGFUSE_BASE_SKU_MASK)
416 		return true;
417 	else
418 		return false;
419 }
420 
421 static void
adf_enable_slice_hang_detection(struct adf_accel_dev * accel_dev)422 adf_enable_slice_hang_detection(struct adf_accel_dev *accel_dev)
423 {
424 	struct resource *csr;
425 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
426 	u32 accel = 0;
427 	unsigned long accel_mask;
428 
429 	csr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
430 	accel_mask = hw_device->accel_mask;
431 
432 	for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
433 	{
434 		/* Unmasks Slice Hang interrupts so they can be seen by IA. */
435 		ADF_CSR_WR(csr,
436 			   ADF_C4XXX_SHINTMASKSSM_OFFSET(accel),
437 			   ADF_C4XXX_SHINTMASKSSM_VAL);
438 	}
439 }
440 
441 static void
adf_enable_ras(struct adf_accel_dev * accel_dev)442 adf_enable_ras(struct adf_accel_dev *accel_dev)
443 {
444 	struct resource *csr;
445 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
446 	u32 accel = 0;
447 	unsigned long accel_mask;
448 
449 	csr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
450 	accel_mask = hw_device->accel_mask;
451 
452 	for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
453 	{
454 		ADF_CSR_WR(csr,
455 			   ADF_C4XXX_GET_SSMFEATREN_OFFSET(accel),
456 			   ADF_C4XXX_SSMFEATREN_VAL);
457 	}
458 }
459 
460 static u32
get_clock_speed(struct adf_hw_device_data * self)461 get_clock_speed(struct adf_hw_device_data *self)
462 {
463 	/* c4xxx CPP clock is equal to high-speed clock */
464 	return self->clock_frequency;
465 }
466 
467 static void
adf_enable_error_interrupts(struct adf_accel_dev * accel_dev)468 adf_enable_error_interrupts(struct adf_accel_dev *accel_dev)
469 {
470 	struct resource *csr, *aram_csr;
471 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
472 	u32 accel = 0;
473 	unsigned long accel_mask;
474 
475 	csr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
476 	aram_csr = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
477 	accel_mask = hw_device->accel_mask;
478 
479 	for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
480 	{
481 		/* Enable shared memory, MMP, CPP, PPERR interrupts
482 		 * for a given accel
483 		 */
484 		ADF_CSR_WR(csr, ADF_C4XXX_GET_INTMASKSSM_OFFSET(accel), 0);
485 
486 		/* Enable SPP parity error interrupts for a given accel */
487 		ADF_CSR_WR(csr, ADF_C4XXX_GET_SPPPARERRMSK_OFFSET(accel), 0);
488 
489 		/* Enable ssm soft parity errors on given accel */
490 		ADF_CSR_WR(csr,
491 			   ADF_C4XXX_GET_SSMSOFTERRORPARITY_MASK_OFFSET(accel),
492 			   ADF_C4XXX_SSMSOFTERRORPARITY_MASK_VAL);
493 	}
494 
495 	/* Enable interrupts for VFtoPF0_127. */
496 	ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK4, ADF_C4XXX_VF2PF0_31);
497 	ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK5, ADF_C4XXX_VF2PF32_63);
498 	ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK6, ADF_C4XXX_VF2PF64_95);
499 	ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK7, ADF_C4XXX_VF2PF96_127);
500 
501 	/* Enable interrupts signaling ECC correctable errors for all AEs */
502 	ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK8, ADF_C4XXX_ERRMSK8_COERR);
503 	ADF_CSR_WR(csr,
504 		   ADF_C4XXX_HI_ME_COR_ERRLOG_ENABLE,
505 		   ADF_C4XXX_HI_ME_COR_ERRLOG_ENABLE_MASK);
506 
507 	/* Enable error interrupts reported by ERRSOU9 */
508 	ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK9, ADF_C4XXX_ERRMSK9_IRQ_MASK);
509 
510 	/* Enable uncorrectable errors on all the AE */
511 	ADF_CSR_WR(csr,
512 		   ADF_C4XXX_HI_ME_UNCERR_LOG_ENABLE,
513 		   ADF_C4XXX_HI_ME_UNCERR_LOG_ENABLE_MASK);
514 
515 	/* Enable CPP Agent to report command parity errors */
516 	ADF_CSR_WR(csr,
517 		   ADF_C4XXX_HI_CPP_AGENT_CMD_PAR_ERR_LOG_ENABLE,
518 		   ADF_C4XXX_HI_CPP_AGENT_CMD_PAR_ERR_LOG_ENABLE_MASK);
519 
520 	/* Enable reporting of RI memory parity errors */
521 	ADF_CSR_WR(csr,
522 		   ADF_C4XXX_RI_MEM_PAR_ERR_EN0,
523 		   ADF_C4XXX_RI_MEM_PAR_ERR_EN0_MASK);
524 
525 	/* Enable reporting of TI memory parity errors */
526 	ADF_CSR_WR(csr,
527 		   ADF_C4XXX_TI_MEM_PAR_ERR_EN0,
528 		   ADF_C4XXX_TI_MEM_PAR_ERR_EN0_MASK);
529 	ADF_CSR_WR(csr,
530 		   ADF_C4XXX_TI_MEM_PAR_ERR_EN1,
531 		   ADF_C4XXX_TI_MEM_PAR_ERR_EN1_MASK);
532 
533 	/* Enable SSM errors */
534 	ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK10, ADF_C4XXX_ERRMSK10_SSM_ERR);
535 
536 	/* Enable miscellaneous errors (ethernet doorbell aram, ici, ice) */
537 	ADF_CSR_WR(csr, ADF_C4XXX_ERRMSK11, ADF_C4XXX_ERRMSK11_ERR);
538 
539 	/* RI CPP bus interface error detection and reporting. */
540 	ADF_CSR_WR(csr, ADF_C4XXX_RICPPINTCTL, ADF_C4XXX_RICPP_EN);
541 
542 	/* TI CPP bus interface error detection and reporting. */
543 	ADF_CSR_WR(csr, ADF_C4XXX_TICPPINTCTL, ADF_C4XXX_TICPP_EN);
544 
545 	/* Enable CFC Error interrupts and logging. */
546 	ADF_CSR_WR(csr, ADF_C4XXX_CPP_CFC_ERR_CTRL, ADF_C4XXX_CPP_CFC_UE);
547 
548 	/* Enable ARAM correctable error detection. */
549 	ADF_CSR_WR(aram_csr, ADF_C4XXX_ARAMCERR, ADF_C4XXX_ARAM_CERR);
550 
551 	/* Enable ARAM uncorrectable error detection. */
552 	ADF_CSR_WR(aram_csr, ADF_C4XXX_ARAMUERR, ADF_C4XXX_ARAM_UERR);
553 
554 	/* Enable Push/Pull Misc Uncorrectable error interrupts and logging */
555 	ADF_CSR_WR(aram_csr, ADF_C4XXX_CPPMEMTGTERR, ADF_C4XXX_TGT_UERR);
556 }
557 
558 static void
adf_enable_mmp_error_correction(struct resource * csr,struct adf_hw_device_data * hw_data)559 adf_enable_mmp_error_correction(struct resource *csr,
560 				struct adf_hw_device_data *hw_data)
561 {
562 	unsigned int accel = 0, mmp;
563 	unsigned long uerrssmmmp_mask, cerrssmmmp_mask;
564 	enum operation op;
565 	unsigned long accel_mask;
566 
567 	/* Prepare values and operation that will be performed on
568 	 * UERRSSMMMP and CERRSSMMMP registers on each MMP
569 	 */
570 	if (hw_data->accel_capabilities_mask &
571 	    ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC) {
572 		uerrssmmmp_mask = ADF_C4XXX_UERRSSMMMP_EN;
573 		cerrssmmmp_mask = ADF_C4XXX_CERRSSMMMP_EN;
574 		op = OR;
575 	} else {
576 		uerrssmmmp_mask = ~ADF_C4XXX_UERRSSMMMP_EN;
577 		cerrssmmmp_mask = ~ADF_C4XXX_CERRSSMMMP_EN;
578 		op = AND;
579 	}
580 
581 	accel_mask = hw_data->accel_mask;
582 
583 	/* Enable MMP Logging */
584 	for_each_set_bit(accel, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
585 	{
586 		/* Set power-up */
587 		adf_csr_fetch_and_and(csr,
588 				      ADF_C4XXX_SLICEPWRDOWN(accel),
589 				      ~ADF_C4XXX_MMP_PWR_UP_MSK);
590 
591 		for (mmp = 0; mmp < ADF_C4XXX_MAX_MMP; ++mmp) {
592 			adf_csr_fetch_and_update(op,
593 						 csr,
594 						 ADF_C4XXX_UERRSSMMMP(accel,
595 								      mmp),
596 						 uerrssmmmp_mask);
597 			adf_csr_fetch_and_update(op,
598 						 csr,
599 						 ADF_C4XXX_CERRSSMMMP(accel,
600 								      mmp),
601 						 cerrssmmmp_mask);
602 		}
603 
604 		/* Restore power-down value */
605 		adf_csr_fetch_and_or(csr,
606 				     ADF_C4XXX_SLICEPWRDOWN(accel),
607 				     ADF_C4XXX_MMP_PWR_UP_MSK);
608 	}
609 }
610 
611 static void
get_arb_info(struct arb_info * arb_csrs_info)612 get_arb_info(struct arb_info *arb_csrs_info)
613 {
614 	arb_csrs_info->arbiter_offset = ADF_C4XXX_ARB_OFFSET;
615 	arb_csrs_info->wrk_cfg_offset = ADF_C4XXX_ARB_WQCFG_OFFSET;
616 }
617 
618 static void
get_admin_info(struct admin_info * admin_csrs_info)619 get_admin_info(struct admin_info *admin_csrs_info)
620 {
621 	admin_csrs_info->mailbox_offset = ADF_C4XXX_MAILBOX_BASE_OFFSET;
622 	admin_csrs_info->admin_msg_ur = ADF_C4XXX_ADMINMSGUR_OFFSET;
623 	admin_csrs_info->admin_msg_lr = ADF_C4XXX_ADMINMSGLR_OFFSET;
624 }
625 
626 static void
get_errsou_offset(u32 * errsou3,u32 * errsou5)627 get_errsou_offset(u32 *errsou3, u32 *errsou5)
628 {
629 	*errsou3 = ADF_C4XXX_ERRSOU3;
630 	*errsou5 = ADF_C4XXX_ERRSOU5;
631 }
632 
633 static void
adf_enable_error_correction(struct adf_accel_dev * accel_dev)634 adf_enable_error_correction(struct adf_accel_dev *accel_dev)
635 {
636 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
637 	struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR];
638 	struct resource *csr = misc_bar->virt_addr;
639 	unsigned int val, i = 0;
640 	unsigned long ae_mask;
641 	unsigned long accel_mask;
642 
643 	ae_mask = hw_device->ae_mask;
644 
645 	/* Enable Accel Engine error detection & correction */
646 	for_each_set_bit(i, &ae_mask, ADF_C4XXX_MAX_ACCELENGINES)
647 	{
648 		val = ADF_CSR_RD(csr, ADF_C4XXX_AE_CTX_ENABLES(i));
649 		val |= ADF_C4XXX_ENABLE_AE_ECC_ERR;
650 		ADF_CSR_WR(csr, ADF_C4XXX_AE_CTX_ENABLES(i), val);
651 		val = ADF_CSR_RD(csr, ADF_C4XXX_AE_MISC_CONTROL(i));
652 		val |= ADF_C4XXX_ENABLE_AE_ECC_PARITY_CORR;
653 		ADF_CSR_WR(csr, ADF_C4XXX_AE_MISC_CONTROL(i), val);
654 	}
655 
656 	accel_mask = hw_device->accel_mask;
657 
658 	/* Enable shared memory error detection & correction */
659 	for_each_set_bit(i, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
660 	{
661 		val = ADF_CSR_RD(csr, ADF_C4XXX_UERRSSMSH(i));
662 		val |= ADF_C4XXX_ERRSSMSH_EN;
663 		ADF_CSR_WR(csr, ADF_C4XXX_UERRSSMSH(i), val);
664 		val = ADF_CSR_RD(csr, ADF_C4XXX_CERRSSMSH(i));
665 		val |= ADF_C4XXX_ERRSSMSH_EN;
666 		ADF_CSR_WR(csr, ADF_C4XXX_CERRSSMSH(i), val);
667 	}
668 
669 	adf_enable_ras(accel_dev);
670 	adf_enable_mmp_error_correction(csr, hw_device);
671 	adf_enable_slice_hang_detection(accel_dev);
672 	adf_enable_error_interrupts(accel_dev);
673 }
674 
675 static void
adf_enable_ints(struct adf_accel_dev * accel_dev)676 adf_enable_ints(struct adf_accel_dev *accel_dev)
677 {
678 	struct resource *addr;
679 
680 	addr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
681 
682 	/* Enable bundle interrupts */
683 	ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF0_MASK_OFFSET, ADF_C4XXX_SMIA0_MASK);
684 	ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF1_MASK_OFFSET, ADF_C4XXX_SMIA1_MASK);
685 	ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF2_MASK_OFFSET, ADF_C4XXX_SMIA2_MASK);
686 	ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF3_MASK_OFFSET, ADF_C4XXX_SMIA3_MASK);
687 	/*Enable misc interrupts*/
688 	ADF_CSR_WR(addr, ADF_C4XXX_SMIAPF4_MASK_OFFSET, ADF_C4XXX_SMIA4_MASK);
689 }
690 
691 static u32
get_ae_clock(struct adf_hw_device_data * self)692 get_ae_clock(struct adf_hw_device_data *self)
693 {
694 	/* Clock update interval is <16> ticks for c4xxx. */
695 	return self->clock_frequency / 16;
696 }
697 
698 static int
measure_clock(struct adf_accel_dev * accel_dev)699 measure_clock(struct adf_accel_dev *accel_dev)
700 {
701 	u32 frequency;
702 	int ret = 0;
703 
704 	ret = adf_dev_measure_clock(accel_dev,
705 				    &frequency,
706 				    ADF_C4XXX_MIN_AE_FREQ,
707 				    ADF_C4XXX_MAX_AE_FREQ);
708 	if (ret)
709 		return ret;
710 
711 	accel_dev->hw_device->clock_frequency = frequency;
712 	return 0;
713 }
714 
715 static int
get_storage_enabled(struct adf_accel_dev * accel_dev,uint32_t * storage_enabled)716 get_storage_enabled(struct adf_accel_dev *accel_dev, uint32_t *storage_enabled)
717 {
718 	if (accel_dev->au_info->num_dc_au > 0) {
719 		*storage_enabled = 1;
720 		GET_HW_DATA(accel_dev)->extended_dc_capabilities =
721 		    ICP_ACCEL_CAPABILITIES_ADVANCED_COMPRESSION;
722 	}
723 	return 0;
724 }
725 
726 static u32
c4xxx_get_hw_cap(struct adf_accel_dev * accel_dev)727 c4xxx_get_hw_cap(struct adf_accel_dev *accel_dev)
728 {
729 	device_t pdev = accel_dev->accel_pci_dev.pci_dev;
730 	u32 legfuses;
731 	u32 softstrappull0, softstrappull2;
732 	u32 fusectl0, fusectl2;
733 	u32 capabilities;
734 
735 	/* Read accelerator capabilities mask */
736 	legfuses = pci_read_config(pdev, ADF_DEVICE_LEGFUSE_OFFSET, 4);
737 	capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
738 	    ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
739 	    ICP_ACCEL_CAPABILITIES_CIPHER |
740 	    ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
741 	    ICP_ACCEL_CAPABILITIES_COMPRESSION | ICP_ACCEL_CAPABILITIES_ZUC |
742 	    ICP_ACCEL_CAPABILITIES_HKDF | ICP_ACCEL_CAPABILITIES_SHA3_EXT |
743 	    ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 |
744 	    ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
745 	    ICP_ACCEL_CAPABILITIES_AESGCM_SPC |
746 	    ICP_ACCEL_CAPABILITIES_ECEDMONT;
747 
748 	if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
749 		capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
750 		capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
751 	}
752 	if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE)
753 		capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
754 	if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
755 		capabilities &= ~(ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
756 				  ICP_ACCEL_CAPABILITIES_ECEDMONT);
757 	if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE) {
758 		capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
759 		capabilities &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY;
760 	}
761 	if (legfuses & ICP_ACCEL_MASK_EIA3_SLICE)
762 		capabilities &= ~ICP_ACCEL_CAPABILITIES_ZUC;
763 	if (legfuses & ICP_ACCEL_MASK_SM3_SLICE)
764 		capabilities &= ~ICP_ACCEL_CAPABILITIES_SM3;
765 	if (legfuses & ICP_ACCEL_MASK_SM4_SLICE)
766 		capabilities &= ~ICP_ACCEL_CAPABILITIES_SM4;
767 
768 	/* Read fusectl0 & softstrappull0 registers to ensure inline
769 	 * acceleration is not disabled
770 	 */
771 	softstrappull0 =
772 	    pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL0_OFFSET, 4);
773 	fusectl0 = pci_read_config(pdev, ADF_C4XXX_FUSECTL0_OFFSET, 4);
774 	if ((fusectl0 | softstrappull0) & ADF_C4XXX_FUSE_DISABLE_INLINE_MASK)
775 		capabilities &= ~ICP_ACCEL_CAPABILITIES_INLINE;
776 
777 	/* Read fusectl2 & softstrappull2 registers to check out if
778 	 * PKE/DC are enabled/disabled
779 	 */
780 	softstrappull2 =
781 	    pci_read_config(pdev, ADF_C4XXX_SOFTSTRAPPULL2_OFFSET, 4);
782 	fusectl2 = pci_read_config(pdev, ADF_C4XXX_FUSECTL2_OFFSET, 4);
783 	/* Disable PKE/DC cap if there are no PKE/DC-enabled AUs. */
784 	if (!(~fusectl2 & ~softstrappull2 & ADF_C4XXX_FUSE_PKE_MASK))
785 		capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
786 	if (!(~fusectl2 & ~softstrappull2 & ADF_C4XXX_FUSE_COMP_MASK))
787 		capabilities &= ~(ICP_ACCEL_CAPABILITIES_COMPRESSION |
788 				  ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY);
789 
790 	return capabilities;
791 }
792 
793 static int
c4xxx_configure_accel_units(struct adf_accel_dev * accel_dev)794 c4xxx_configure_accel_units(struct adf_accel_dev *accel_dev)
795 {
796 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES] = { 0 };
797 	unsigned long val;
798 	char val_str[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = { 0 };
799 	int sku;
800 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
801 
802 	sku = get_sku(hw_data);
803 
804 	if (adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC))
805 		goto err;
806 
807 	snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
808 
809 	/* Base station SKU supports symmetric cryptography only. */
810 	if (adf_check_sym_only_sku_c4xxx(accel_dev))
811 		snprintf(val_str, sizeof(val_str), ADF_SERVICE_SYM);
812 	else
813 		snprintf(val_str, sizeof(val_str), ADF_SERVICE_CY);
814 
815 	val = sku_dc_au[sku];
816 	if (val) {
817 		strncat(val_str,
818 			ADF_SERVICES_SEPARATOR ADF_SERVICE_DC,
819 			ADF_CFG_MAX_VAL_LEN_IN_BYTES -
820 			    strnlen(val_str, sizeof(val_str)) -
821 			    ADF_CFG_NULL_TERM_SIZE);
822 	}
823 
824 	if (adf_cfg_add_key_value_param(
825 		accel_dev, ADF_GENERAL_SEC, key, (void *)val_str, ADF_STR))
826 		goto err;
827 
828 	snprintf(key, sizeof(key), ADF_NUM_CY_ACCEL_UNITS);
829 	val = sku_cy_au[sku];
830 	if (adf_cfg_add_key_value_param(
831 		accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC))
832 		goto err;
833 
834 	snprintf(key, sizeof(key), ADF_NUM_DC_ACCEL_UNITS);
835 	val = sku_dc_au[sku];
836 	if (adf_cfg_add_key_value_param(
837 		accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC))
838 		goto err;
839 
840 	snprintf(key, sizeof(key), ADF_NUM_INLINE_ACCEL_UNITS);
841 	val = sku_inline_au[sku];
842 	if (adf_cfg_add_key_value_param(
843 		accel_dev, ADF_GENERAL_SEC, key, (void *)&val, ADF_DEC))
844 		goto err;
845 
846 	return 0;
847 err:
848 	device_printf(GET_DEV(accel_dev), "Failed to configure accel units\n");
849 	return EINVAL;
850 }
851 
852 static void
update_hw_capability(struct adf_accel_dev * accel_dev)853 update_hw_capability(struct adf_accel_dev *accel_dev)
854 {
855 	struct adf_accel_unit_info *au_info = accel_dev->au_info;
856 	struct adf_hw_device_data *hw_device = accel_dev->hw_device;
857 	u32 disabled_caps = 0;
858 
859 	if (!au_info->asym_ae_msk)
860 		disabled_caps = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
861 		    ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
862 
863 	if (!au_info->sym_ae_msk)
864 		disabled_caps |= ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
865 		    ICP_ACCEL_CAPABILITIES_CIPHER | ICP_ACCEL_CAPABILITIES_ZUC |
866 		    ICP_ACCEL_CAPABILITIES_SHA3_EXT |
867 		    ICP_ACCEL_CAPABILITIES_SM3 | ICP_ACCEL_CAPABILITIES_SM4 |
868 		    ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
869 		    ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
870 
871 	if (!au_info->dc_ae_msk) {
872 		disabled_caps |= ICP_ACCEL_CAPABILITIES_COMPRESSION |
873 		    ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY;
874 		hw_device->extended_dc_capabilities = 0;
875 	}
876 
877 	if (!au_info->inline_ingress_msk && !au_info->inline_egress_msk)
878 		disabled_caps |= ICP_ACCEL_CAPABILITIES_INLINE;
879 
880 	hw_device->accel_capabilities_mask =
881 	    c4xxx_get_hw_cap(accel_dev) & ~disabled_caps;
882 }
883 
884 static void
c4xxx_set_sadb_size(struct adf_accel_dev * accel_dev)885 c4xxx_set_sadb_size(struct adf_accel_dev *accel_dev)
886 {
887 	u32 sadb_reg_value = 0;
888 	struct resource *aram_csr_base;
889 
890 	aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
891 	if (accel_dev->au_info->num_inline_au) {
892 		/* REG_SA_DB_CTRL register initialisation */
893 		sadb_reg_value = ADF_C4XXX_SADB_REG_VALUE(accel_dev);
894 		ADF_CSR_WR(aram_csr_base,
895 			   ADF_C4XXX_REG_SA_DB_CTRL,
896 			   sadb_reg_value);
897 	} else {
898 		/* Zero the SADB size when inline is disabled. */
899 		adf_csr_fetch_and_and(aram_csr_base,
900 				      ADF_C4XXX_REG_SA_DB_CTRL,
901 				      ADF_C4XXX_SADB_SIZE_BIT);
902 	}
903 	/* REG_SA_CTRL_LOCK register initialisation. We set the lock
904 	 * bit in order to prevent the REG_SA_DB_CTRL to be
905 	 * overwritten
906 	 */
907 	ADF_CSR_WR(aram_csr_base,
908 		   ADF_C4XXX_REG_SA_CTRL_LOCK,
909 		   ADF_C4XXX_DEFAULT_SA_CTRL_LOCKOUT);
910 }
911 
912 static void
c4xxx_init_error_notification_configuration(struct adf_accel_dev * accel_dev,u32 offset)913 c4xxx_init_error_notification_configuration(struct adf_accel_dev *accel_dev,
914 					    u32 offset)
915 {
916 	struct resource *aram_csr_base;
917 
918 	aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
919 
920 	/* configure error notification configuration registers */
921 	/* Set CD Parity error */
922 	ADF_CSR_WR(aram_csr_base,
923 		   ADF_C4XXX_IC_CD_RF_PARITY_ERR_0 + offset,
924 		   ADF_C4XXX_CD_RF_PARITY_ERR_0_VAL);
925 	ADF_CSR_WR(aram_csr_base,
926 		   ADF_C4XXX_IC_CD_RF_PARITY_ERR_1 + offset,
927 		   ADF_C4XXX_CD_RF_PARITY_ERR_1_VAL);
928 	ADF_CSR_WR(aram_csr_base,
929 		   ADF_C4XXX_IC_CD_RF_PARITY_ERR_2 + offset,
930 		   ADF_C4XXX_CD_RF_PARITY_ERR_2_VAL);
931 	ADF_CSR_WR(aram_csr_base,
932 		   ADF_C4XXX_IC_CD_RF_PARITY_ERR_3 + offset,
933 		   ADF_C4XXX_CD_RF_PARITY_ERR_3_VAL);
934 	/* Set CD RAM ECC Correctable Error */
935 	ADF_CSR_WR(aram_csr_base,
936 		   ADF_C4XXX_IC_CD_CERR + offset,
937 		   ADF_C4XXX_CD_CERR_VAL);
938 	/* Set CD RAM ECC UnCorrectable Error */
939 	ADF_CSR_WR(aram_csr_base,
940 		   ADF_C4XXX_IC_CD_UERR + offset,
941 		   ADF_C4XXX_CD_UERR_VAL);
942 	/* Set Inline (excl cmd_dis) Parity Error */
943 	ADF_CSR_WR(aram_csr_base,
944 		   ADF_C4XXX_IC_INLN_RF_PARITY_ERR_0 + offset,
945 		   ADF_C4XXX_INLN_RF_PARITY_ERR_0_VAL);
946 	ADF_CSR_WR(aram_csr_base,
947 		   ADF_C4XXX_IC_INLN_RF_PARITY_ERR_1 + offset,
948 		   ADF_C4XXX_INLN_RF_PARITY_ERR_1_VAL);
949 	ADF_CSR_WR(aram_csr_base,
950 		   ADF_C4XXX_IC_INLN_RF_PARITY_ERR_2 + offset,
951 		   ADF_C4XXX_INLN_RF_PARITY_ERR_2_VAL);
952 	ADF_CSR_WR(aram_csr_base,
953 		   ADF_C4XXX_IC_INLN_RF_PARITY_ERR_3 + offset,
954 		   ADF_C4XXX_INLN_RF_PARITY_ERR_3_VAL);
955 	ADF_CSR_WR(aram_csr_base,
956 		   ADF_C4XXX_IC_INLN_RF_PARITY_ERR_4 + offset,
957 		   ADF_C4XXX_INLN_RF_PARITY_ERR_4_VAL);
958 	ADF_CSR_WR(aram_csr_base,
959 		   ADF_C4XXX_IC_INLN_RF_PARITY_ERR_5 + offset,
960 		   ADF_C4XXX_INLN_RF_PARITY_ERR_5_VAL);
961 	/* Set Parser RAM ECC Correctable Error */
962 	ADF_CSR_WR(aram_csr_base,
963 		   ADF_C4XXX_IC_PARSER_CERR + offset,
964 		   ADF_C4XXX_PARSER_CERR_VAL);
965 	/* Set Parser RAM ECC UnCorrectable Error */
966 	ADF_CSR_WR(aram_csr_base,
967 		   ADF_C4XXX_IC_PARSER_UERR + offset,
968 		   ADF_C4XXX_PARSER_UERR_VAL);
969 	/* Set CTPB RAM ECC Correctable Error */
970 	ADF_CSR_WR(aram_csr_base,
971 		   ADF_C4XXX_IC_CTPB_CERR + offset,
972 		   ADF_C4XXX_CTPB_CERR_VAL);
973 	/* Set CTPB RAM ECC UnCorrectable Error */
974 	ADF_CSR_WR(aram_csr_base,
975 		   ADF_C4XXX_IC_CTPB_UERR + offset,
976 		   ADF_C4XXX_CTPB_UERR_VAL);
977 	/* Set CPP Interface Status */
978 	ADF_CSR_WR(aram_csr_base,
979 		   ADF_C4XXX_IC_CPPM_ERR_STAT + offset,
980 		   ADF_C4XXX_CPPM_ERR_STAT_VAL);
981 	/* Set CGST_MGMT_INT */
982 	ADF_CSR_WR(aram_csr_base,
983 		   ADF_C4XXX_IC_CONGESTION_MGMT_INT + offset,
984 		   ADF_C4XXX_CONGESTION_MGMT_INI_VAL);
985 	/* CPP Interface Status */
986 	ADF_CSR_WR(aram_csr_base,
987 		   ADF_C4XXX_IC_CPPT_ERR_STAT + offset,
988 		   ADF_C4XXX_CPPT_ERR_STAT_VAL);
989 	/* MAC Interrupt Mask */
990 	ADF_CSR_WR64(aram_csr_base,
991 		     ADF_C4XXX_IC_MAC_IM + offset,
992 		     ADF_C4XXX_MAC_IM_VAL);
993 }
994 
995 static void
c4xxx_enable_parse_extraction(struct adf_accel_dev * accel_dev)996 c4xxx_enable_parse_extraction(struct adf_accel_dev *accel_dev)
997 {
998 	struct resource *aram_csr_base;
999 
1000 	aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
1001 
1002 	/* Enable Inline Parse Extraction CRSs */
1003 
1004 	/* Set IC_PARSE_CTRL register */
1005 	ADF_CSR_WR(aram_csr_base,
1006 		   ADF_C4XXX_IC_PARSE_CTRL_OFFSET,
1007 		   ADF_C4XXX_IC_PARSE_CTRL_OFFSET_DEFAULT_VALUE);
1008 
1009 	/* Set IC_PARSE_FIXED_DATA(0) */
1010 	ADF_CSR_WR(aram_csr_base,
1011 		   ADF_C4XXX_IC_PARSE_FIXED_DATA(0),
1012 		   ADF_C4XXX_DEFAULT_IC_PARSE_FIXED_DATA_0);
1013 
1014 	/* Set IC_PARSE_FIXED_LENGTH */
1015 	ADF_CSR_WR(aram_csr_base,
1016 		   ADF_C4XXX_IC_PARSE_FIXED_LENGTH,
1017 		   ADF_C4XXX_DEFAULT_IC_PARSE_FIXED_LEN);
1018 
1019 	/* Configure ESP protocol from an IPv4 header */
1020 	ADF_CSR_WR(aram_csr_base,
1021 		   ADF_C4XXX_IC_PARSE_IPV4_OFFSET_0,
1022 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_0_VALUE);
1023 	ADF_CSR_WR(aram_csr_base,
1024 		   ADF_C4XXX_IC_PARSE_IPV4_LENGTH_0,
1025 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_0_VALUE);
1026 	/* Configure protocol extraction field from an IPv4 header */
1027 	ADF_CSR_WR(aram_csr_base,
1028 		   ADF_C4XXX_IC_PARSE_IPV4_OFFSET_1,
1029 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_1_VALUE);
1030 	ADF_CSR_WR(aram_csr_base,
1031 		   ADF_C4XXX_IC_PARSE_IPV4_LENGTH_1,
1032 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_1_VALUE);
1033 	/* Configure SPI extraction field from an IPv4 header */
1034 	ADF_CSR_WR(aram_csr_base,
1035 		   ADF_C4XXX_IC_PARSE_IPV4_OFFSET_2,
1036 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_2_VALUE);
1037 	ADF_CSR_WR(aram_csr_base,
1038 		   ADF_C4XXX_IC_PARSE_IPV4_LENGTH_2,
1039 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_2_VALUE);
1040 	/* Configure destination field IP address from an IPv4 header */
1041 	ADF_CSR_WR(aram_csr_base,
1042 		   ADF_C4XXX_IC_PARSE_IPV4_OFFSET_3,
1043 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_OFFS_3_VALUE);
1044 	ADF_CSR_WR(aram_csr_base,
1045 		   ADF_C4XXX_IC_PARSE_IPV4_LENGTH_3,
1046 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV4_LEN_3_VALUE);
1047 
1048 	/* Configure function number extraction field from an IPv6 header */
1049 	ADF_CSR_WR(aram_csr_base,
1050 		   ADF_C4XXX_IC_PARSE_IPV6_OFFSET_0,
1051 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_0_VALUE);
1052 	ADF_CSR_WR(aram_csr_base,
1053 		   ADF_C4XXX_IC_PARSE_IPV6_LENGTH_0,
1054 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_0_VALUE);
1055 	/* Configure protocol extraction field from an IPv6 header */
1056 	ADF_CSR_WR(aram_csr_base,
1057 		   ADF_C4XXX_IC_PARSE_IPV6_OFFSET_1,
1058 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_1_VALUE);
1059 	ADF_CSR_WR(aram_csr_base,
1060 		   ADF_C4XXX_IC_PARSE_IPV6_LENGTH_1,
1061 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_1_VALUE);
1062 	/* Configure SPI extraction field from an IPv6 header */
1063 	ADF_CSR_WR(aram_csr_base,
1064 		   ADF_C4XXX_IC_PARSE_IPV6_OFFSET_2,
1065 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_2_VALUE);
1066 	ADF_CSR_WR(aram_csr_base,
1067 		   ADF_C4XXX_IC_PARSE_IPV6_LENGTH_2,
1068 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_2_VALUE);
1069 	/* Configure destination field IP address from an IPv6 header */
1070 	ADF_CSR_WR(aram_csr_base,
1071 		   ADF_C4XXX_IC_PARSE_IPV6_OFFSET_3,
1072 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_OFFS_3_VALUE);
1073 	ADF_CSR_WR(aram_csr_base,
1074 		   ADF_C4XXX_IC_PARSE_IPV6_LENGTH_3,
1075 		   ADF_C4XXX_DEFAULT_IC_PARSE_IPV6_LEN_3_VALUE);
1076 }
1077 
1078 static int
adf_get_inline_ipsec_algo_group(struct adf_accel_dev * accel_dev,unsigned long * ipsec_algo_group)1079 adf_get_inline_ipsec_algo_group(struct adf_accel_dev *accel_dev,
1080 				unsigned long *ipsec_algo_group)
1081 {
1082 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
1083 
1084 	if (adf_cfg_get_param_value(
1085 		accel_dev, ADF_INLINE_SEC, ADF_INLINE_IPSEC_ALGO_GROUP, val))
1086 		return EFAULT;
1087 	if (kstrtoul(val, 0, ipsec_algo_group))
1088 		return EFAULT;
1089 
1090 	/* Verify the ipsec_algo_group */
1091 	if (*ipsec_algo_group >= IPSEC_ALGO_GROUP_DELIMITER) {
1092 		device_printf(
1093 		    GET_DEV(accel_dev),
1094 		    "Unsupported IPSEC algo group %lu in config file!\n",
1095 		    *ipsec_algo_group);
1096 		return EFAULT;
1097 	}
1098 
1099 	return 0;
1100 }
1101 
1102 static int
c4xxx_init_inline_hw(struct adf_accel_dev * accel_dev)1103 c4xxx_init_inline_hw(struct adf_accel_dev *accel_dev)
1104 {
1105 	u32 sa_entry_reg_value = 0;
1106 	u32 sa_fn_lim = 0;
1107 	u32 supported_algo = 0;
1108 	struct resource *aram_csr_base;
1109 	u32 offset;
1110 	unsigned long ipsec_algo_group = IPSEC_DEFAUL_ALGO_GROUP;
1111 
1112 	aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
1113 
1114 	if (adf_get_inline_ipsec_algo_group(accel_dev, &ipsec_algo_group))
1115 		return EFAULT;
1116 
1117 	sa_entry_reg_value |=
1118 	    (ADF_C4XXX_DEFAULT_LU_KEY_LEN << ADF_C4XXX_LU_KEY_LEN_BIT_OFFSET);
1119 	if (ipsec_algo_group == IPSEC_DEFAUL_ALGO_GROUP) {
1120 		sa_entry_reg_value |= ADF_C4XXX_DEFAULT_SA_SIZE;
1121 		sa_fn_lim =
1122 		    ADF_C4XXX_FUNC_LIMIT(accel_dev, ADF_C4XXX_DEFAULT_SA_SIZE);
1123 		supported_algo = ADF_C4XXX_DEFAULT_SUPPORTED_ALGORITHMS;
1124 	} else if (ipsec_algo_group == IPSEC_ALGO_GROUP1) {
1125 		sa_entry_reg_value |= ADF_C4XXX_ALGO_GROUP1_SA_SIZE;
1126 		sa_fn_lim = ADF_C4XXX_FUNC_LIMIT(accel_dev,
1127 						 ADF_C4XXX_ALGO_GROUP1_SA_SIZE);
1128 		supported_algo = ADF_C4XXX_SUPPORTED_ALGORITHMS_GROUP1;
1129 	} else {
1130 		return EFAULT;
1131 	}
1132 
1133 	/* REG_SA_ENTRY_CTRL register initialisation */
1134 	ADF_CSR_WR(aram_csr_base,
1135 		   ADF_C4XXX_REG_SA_ENTRY_CTRL,
1136 		   sa_entry_reg_value);
1137 
1138 	/* REG_SAL_FUNC_LIMITS register initialisation. Only the first register
1139 	 * needs to be initialised to enable as it is assigned to a physical
1140 	 * function. Other registers will be initialised by the LAN PF driver.
1141 	 * The function limits is initialised to its maximal value.
1142 	 */
1143 	ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_FUNC_LIMITS, sa_fn_lim);
1144 
1145 	/* Initialize REG_SA_SCRATCH[0] register to
1146 	 * advertise supported crypto algorithms
1147 	 */
1148 	ADF_CSR_WR(aram_csr_base, ADF_C4XXX_REG_SA_SCRATCH_0, supported_algo);
1149 
1150 	/* REG_SA_SCRATCH[2] register initialisation
1151 	 * to advertise supported crypto offload features.
1152 	 */
1153 	ADF_CSR_WR(aram_csr_base,
1154 		   ADF_C4XXX_REG_SA_SCRATCH_2,
1155 		   ADF_C4XXX_DEFAULT_CY_OFFLOAD_FEATURES);
1156 
1157 	/* Overwrite default MAC_CFG register in ingress offset */
1158 	ADF_CSR_WR64(aram_csr_base,
1159 		     ADF_C4XXX_MAC_CFG + ADF_C4XXX_INLINE_INGRESS_OFFSET,
1160 		     ADF_C4XXX_MAC_CFG_VALUE);
1161 
1162 	/* Overwrite default MAC_CFG register in egress offset */
1163 	ADF_CSR_WR64(aram_csr_base,
1164 		     ADF_C4XXX_MAC_CFG + ADF_C4XXX_INLINE_EGRESS_OFFSET,
1165 		     ADF_C4XXX_MAC_CFG_VALUE);
1166 
1167 	/* Overwrite default MAC_PIA_CFG
1168 	 * (Packet Interface Adapter Configuration) registers
1169 	 * in ingress offset
1170 	 */
1171 	ADF_CSR_WR64(aram_csr_base,
1172 		     ADF_C4XXX_MAC_PIA_CFG + ADF_C4XXX_INLINE_INGRESS_OFFSET,
1173 		     ADF_C4XXX_MAC_PIA_CFG_VALUE);
1174 
1175 	/* Overwrite default MAC_PIA_CFG in egress offset */
1176 	ADF_CSR_WR64(aram_csr_base,
1177 		     ADF_C4XXX_MAC_PIA_CFG + ADF_C4XXX_INLINE_EGRESS_OFFSET,
1178 		     ADF_C4XXX_MAC_PIA_CFG_VALUE);
1179 
1180 	c4xxx_enable_parse_extraction(accel_dev);
1181 
1182 	ADF_CSR_WR(aram_csr_base,
1183 		   ADF_C4XXX_INGRESS_CMD_DIS_MISC,
1184 		   ADF_C4XXX_REG_CMD_DIS_MISC_DEFAULT_VALUE);
1185 
1186 	ADF_CSR_WR(aram_csr_base,
1187 		   ADF_C4XXX_EGRESS_CMD_DIS_MISC,
1188 		   ADF_C4XXX_REG_CMD_DIS_MISC_DEFAULT_VALUE);
1189 
1190 	/* Set bits<1:0> in ADF_C4XXX_INLINE_CAPABILITY register to
1191 	 * advertize that both ingress and egress directions are available
1192 	 */
1193 	ADF_CSR_WR(aram_csr_base,
1194 		   ADF_C4XXX_REG_SA_INLINE_CAPABILITY,
1195 		   ADF_C4XXX_INLINE_CAPABILITIES);
1196 
1197 	/* Set error notification configuration of ingress */
1198 	offset = ADF_C4XXX_INLINE_INGRESS_OFFSET;
1199 	c4xxx_init_error_notification_configuration(accel_dev, offset);
1200 	/* Set error notification configuration of egress */
1201 	offset = ADF_C4XXX_INLINE_EGRESS_OFFSET;
1202 	c4xxx_init_error_notification_configuration(accel_dev, offset);
1203 
1204 	return 0;
1205 }
1206 
1207 static void
adf_enable_inline_notification(struct adf_accel_dev * accel_dev)1208 adf_enable_inline_notification(struct adf_accel_dev *accel_dev)
1209 {
1210 	struct resource *aram_csr_base;
1211 
1212 	aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
1213 
1214 	/* Set bit<0> in ADF_C4XXX_REG_SA_INLINE_ENABLE to advertise
1215 	 * that inline is enabled.
1216 	 */
1217 	ADF_CSR_WR(aram_csr_base,
1218 		   ADF_C4XXX_REG_SA_INLINE_ENABLE,
1219 		   ADF_C4XXX_INLINE_ENABLED);
1220 }
1221 
1222 static int
c4xxx_init_aram_config(struct adf_accel_dev * accel_dev)1223 c4xxx_init_aram_config(struct adf_accel_dev *accel_dev)
1224 {
1225 	u32 aram_size = ADF_C4XXX_2MB_ARAM_SIZE;
1226 	u32 ibuff_mem_needed = 0;
1227 	u32 usable_aram_size = 0;
1228 	struct adf_hw_aram_info *aram_info;
1229 	u32 sa_db_ctl_value;
1230 	struct resource *aram_csr_base;
1231 	u8 profile = 0;
1232 	u32 sadb_size = 0;
1233 	u32 sa_size = 0;
1234 	unsigned long ipsec_algo_group = IPSEC_DEFAUL_ALGO_GROUP;
1235 	u32 i;
1236 
1237 	if (accel_dev->au_info->num_inline_au > 0)
1238 		if (adf_get_inline_ipsec_algo_group(accel_dev,
1239 						    &ipsec_algo_group))
1240 			return EFAULT;
1241 
1242 	/* Allocate memory for adf_hw_aram_info */
1243 	aram_info = kzalloc(sizeof(*accel_dev->aram_info), GFP_KERNEL);
1244 	if (!aram_info)
1245 		return ENOMEM;
1246 
1247 	/* Initialise Inline direction */
1248 	aram_info->inline_direction_egress_mask = 0;
1249 	if (accel_dev->au_info->num_inline_au) {
1250 		/* Set inline direction bitmap in the ARAM to
1251 		 * inform firmware which ME is egress
1252 		 */
1253 		aram_info->inline_direction_egress_mask =
1254 		    accel_dev->au_info->inline_egress_msk;
1255 
1256 		/* User profile is valid, we can now add it
1257 		 * in the ARAM partition table
1258 		 */
1259 		aram_info->inline_congest_mngt_profile = profile;
1260 	}
1261 	/* Initialise DC ME mask, "1" = ME is used for DC operations */
1262 	aram_info->dc_ae_mask = accel_dev->au_info->dc_ae_msk;
1263 
1264 	/* Initialise CY ME mask, "1" = ME is used for CY operations
1265 	 * Since asym service can also be enabled on inline AEs, here
1266 	 * we use the sym ae mask for configuring the cy_ae_msk
1267 	 */
1268 	aram_info->cy_ae_mask = accel_dev->au_info->sym_ae_msk;
1269 
1270 	/* Configure number of long words in the ARAM */
1271 	aram_info->num_aram_lw_entries = ADF_C4XXX_NUM_ARAM_ENTRIES;
1272 
1273 	/* Reset region offset values to 0xffffffff */
1274 	aram_info->mmp_region_offset = ~aram_info->mmp_region_offset;
1275 	aram_info->skm_region_offset = ~aram_info->skm_region_offset;
1276 	aram_info->inter_buff_aram_region_offset =
1277 	    ~aram_info->inter_buff_aram_region_offset;
1278 
1279 	/* Determine ARAM size */
1280 	aram_csr_base = (&GET_BARS(accel_dev)[ADF_C4XXX_SRAM_BAR])->virt_addr;
1281 	sa_db_ctl_value = ADF_CSR_RD(aram_csr_base, ADF_C4XXX_REG_SA_DB_CTRL);
1282 
1283 	aram_size = (sa_db_ctl_value & ADF_C4XXX_SADB_SIZE_BIT) ?
1284 	    ADF_C4XXX_2MB_ARAM_SIZE :
1285 	    ADF_C4XXX_4MB_ARAM_SIZE;
1286 	device_printf(GET_DEV(accel_dev),
1287 		      "Total available accelerator memory: %uMB\n",
1288 		      aram_size / ADF_C4XXX_1MB_SIZE);
1289 
1290 	/* Compute MMP region offset */
1291 	aram_info->mmp_region_size = ADF_C4XXX_DEFAULT_MMP_REGION_SIZE;
1292 	aram_info->mmp_region_offset = aram_size - aram_info->mmp_region_size;
1293 
1294 	if (accel_dev->au_info->num_cy_au ||
1295 	    accel_dev->au_info->num_inline_au) {
1296 		/* Crypto is available therefore we must
1297 		 * include space in the ARAM for SKM.
1298 		 */
1299 		aram_info->skm_region_size = ADF_C4XXX_DEFAULT_SKM_REGION_SIZE;
1300 		/* Compute SKM region offset */
1301 		aram_info->skm_region_offset = aram_size -
1302 		    (aram_info->mmp_region_size + aram_info->skm_region_size);
1303 	}
1304 
1305 	/* SADB always start at offset 0. */
1306 	if (accel_dev->au_info->num_inline_au) {
1307 		/* Inline is available therefore we must
1308 		 * use remaining ARAM for the SADB.
1309 		 */
1310 		sadb_size = aram_size -
1311 		    (aram_info->mmp_region_size + aram_info->skm_region_size);
1312 
1313 		/*
1314 		 * When the inline service is enabled, the policy is that
1315 		 * compression gives up it's space in ARAM to allow for a
1316 		 * larger SADB. Compression must use DRAM instead of ARAM.
1317 		 */
1318 		aram_info->inter_buff_aram_region_size = 0;
1319 
1320 		/* the SADB size must be an integral multiple of the SA size */
1321 		if (ipsec_algo_group == IPSEC_DEFAUL_ALGO_GROUP) {
1322 			sa_size = ADF_C4XXX_DEFAULT_SA_SIZE;
1323 		} else {
1324 			/* IPSEC_ALGO_GROUP1
1325 			 * Total 2 algo groups.
1326 			 */
1327 			sa_size = ADF_C4XXX_ALGO_GROUP1_SA_SIZE;
1328 		}
1329 
1330 		sadb_size = sadb_size -
1331 		    (sadb_size % ADF_C4XXX_SA_SIZE_IN_BYTES(sa_size));
1332 		aram_info->sadb_region_size = sadb_size;
1333 	}
1334 
1335 	if (accel_dev->au_info->num_dc_au &&
1336 	    !accel_dev->au_info->num_inline_au) {
1337 		/* Compression is available therefore we must see if there is
1338 		 * space in the ARAM for intermediate buffers.
1339 		 */
1340 		aram_info->inter_buff_aram_region_size = 0;
1341 		usable_aram_size = aram_size -
1342 		    (aram_info->mmp_region_size + aram_info->skm_region_size);
1343 
1344 		for (i = 1; i <= accel_dev->au_info->num_dc_au; i++) {
1345 			if ((i * ADF_C4XXX_AU_COMPR_INTERM_SIZE) >
1346 			    usable_aram_size)
1347 				break;
1348 
1349 			ibuff_mem_needed = i * ADF_C4XXX_AU_COMPR_INTERM_SIZE;
1350 		}
1351 
1352 		/* Set remaining ARAM to intermediate buffers. Firmware handles
1353 		 * fallback to DRAM for cases were number of AU assigned
1354 		 * to compression exceeds available ARAM memory.
1355 		 */
1356 		aram_info->inter_buff_aram_region_size = ibuff_mem_needed;
1357 
1358 		/* If ARAM is used for compression set its initial offset. */
1359 		if (aram_info->inter_buff_aram_region_size)
1360 			aram_info->inter_buff_aram_region_offset = 0;
1361 	}
1362 
1363 	accel_dev->aram_info = aram_info;
1364 
1365 	return 0;
1366 }
1367 
1368 static void
c4xxx_exit_aram_config(struct adf_accel_dev * accel_dev)1369 c4xxx_exit_aram_config(struct adf_accel_dev *accel_dev)
1370 {
1371 	kfree(accel_dev->aram_info);
1372 	accel_dev->aram_info = NULL;
1373 }
1374 
1375 static u32
get_num_accel_units(struct adf_hw_device_data * self)1376 get_num_accel_units(struct adf_hw_device_data *self)
1377 {
1378 	u32 i = 0, num_accel = 0;
1379 	unsigned long accel_mask = 0;
1380 
1381 	if (!self || !self->accel_mask)
1382 		return 0;
1383 
1384 	accel_mask = self->accel_mask;
1385 
1386 	for_each_set_bit(i, &accel_mask, ADF_C4XXX_MAX_ACCELERATORS)
1387 	{
1388 		num_accel++;
1389 	}
1390 
1391 	return num_accel / ADF_C4XXX_NUM_ACCEL_PER_AU;
1392 }
1393 
1394 static int
get_accel_unit(struct adf_hw_device_data * self,struct adf_accel_unit ** accel_unit)1395 get_accel_unit(struct adf_hw_device_data *self,
1396 	       struct adf_accel_unit **accel_unit)
1397 {
1398 	enum dev_sku_info sku;
1399 
1400 	sku = get_sku(self);
1401 
1402 	switch (sku) {
1403 	case DEV_SKU_1:
1404 	case DEV_SKU_1_CY:
1405 		*accel_unit = adf_c4xxx_au_32_ae;
1406 		break;
1407 	case DEV_SKU_2:
1408 	case DEV_SKU_2_CY:
1409 		*accel_unit = adf_c4xxx_au_24_ae;
1410 		break;
1411 	case DEV_SKU_3:
1412 	case DEV_SKU_3_CY:
1413 		*accel_unit = adf_c4xxx_au_12_ae;
1414 		break;
1415 	default:
1416 		*accel_unit = adf_c4xxx_au_emulation;
1417 		break;
1418 	}
1419 	return 0;
1420 }
1421 
1422 static int
get_ae_info(struct adf_hw_device_data * self,const struct adf_ae_info ** ae_info)1423 get_ae_info(struct adf_hw_device_data *self, const struct adf_ae_info **ae_info)
1424 {
1425 	enum dev_sku_info sku;
1426 
1427 	sku = get_sku(self);
1428 
1429 	switch (sku) {
1430 	case DEV_SKU_1:
1431 		*ae_info = adf_c4xxx_32_ae;
1432 		break;
1433 	case DEV_SKU_1_CY:
1434 		*ae_info = adf_c4xxx_32_ae_sym;
1435 		break;
1436 	case DEV_SKU_2:
1437 		*ae_info = adf_c4xxx_24_ae;
1438 		break;
1439 	case DEV_SKU_2_CY:
1440 		*ae_info = adf_c4xxx_24_ae_sym;
1441 		break;
1442 	case DEV_SKU_3:
1443 		*ae_info = adf_c4xxx_12_ae;
1444 		break;
1445 	case DEV_SKU_3_CY:
1446 		*ae_info = adf_c4xxx_12_ae_sym;
1447 		break;
1448 	default:
1449 		*ae_info = adf_c4xxx_12_ae;
1450 		break;
1451 	}
1452 	return 0;
1453 }
1454 
1455 static int
adf_add_debugfs_info(struct adf_accel_dev * accel_dev)1456 adf_add_debugfs_info(struct adf_accel_dev *accel_dev)
1457 {
1458 	/* Add Accel Unit configuration table to debug FS interface */
1459 	if (c4xxx_init_ae_config(accel_dev)) {
1460 		device_printf(GET_DEV(accel_dev),
1461 			      "Failed to create entry for AE configuration\n");
1462 		return EFAULT;
1463 	}
1464 
1465 	return 0;
1466 }
1467 
1468 static void
adf_remove_debugfs_info(struct adf_accel_dev * accel_dev)1469 adf_remove_debugfs_info(struct adf_accel_dev *accel_dev)
1470 {
1471 	/* Remove Accel Unit configuration table from debug FS interface */
1472 	c4xxx_exit_ae_config(accel_dev);
1473 }
1474 
1475 static int
check_svc_to_hw_capabilities(struct adf_accel_dev * accel_dev,const char * svc_name,enum icp_qat_capabilities_mask cap)1476 check_svc_to_hw_capabilities(struct adf_accel_dev *accel_dev,
1477 			     const char *svc_name,
1478 			     enum icp_qat_capabilities_mask cap)
1479 {
1480 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1481 	u32 hw_cap = hw_data->accel_capabilities_mask;
1482 
1483 	hw_cap &= cap;
1484 	if (hw_cap != cap) {
1485 		device_printf(GET_DEV(accel_dev),
1486 			      "Service not supported by accelerator: %s\n",
1487 			      svc_name);
1488 		return EPERM;
1489 	}
1490 
1491 	return 0;
1492 }
1493 
1494 static int
check_accel_unit_config(struct adf_accel_dev * accel_dev,u8 num_cy_au,u8 num_dc_au,u8 num_inline_au)1495 check_accel_unit_config(struct adf_accel_dev *accel_dev,
1496 			u8 num_cy_au,
1497 			u8 num_dc_au,
1498 			u8 num_inline_au)
1499 {
1500 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1501 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
1502 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
1503 	u32 num_au = hw_data->get_num_accel_units(hw_data);
1504 	u32 service_mask = ADF_ACCEL_SERVICE_NULL;
1505 	char *token, *cur_str;
1506 	int ret = 0;
1507 
1508 	/* Get the services enabled by user */
1509 	snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
1510 	if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
1511 		return EFAULT;
1512 	cur_str = val;
1513 	token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
1514 	while (token) {
1515 		if (!strncmp(token, ADF_SERVICE_CY, strlen(ADF_SERVICE_CY))) {
1516 			service_mask |= ADF_ACCEL_CRYPTO;
1517 			ret |= check_svc_to_hw_capabilities(
1518 			    accel_dev,
1519 			    token,
1520 			    ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
1521 				ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC);
1522 		}
1523 
1524 		if (!strncmp(token, ADF_CFG_SYM, strlen(ADF_CFG_SYM))) {
1525 			service_mask |= ADF_ACCEL_CRYPTO;
1526 			ret |= check_svc_to_hw_capabilities(
1527 			    accel_dev,
1528 			    token,
1529 			    ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC);
1530 		}
1531 
1532 		if (!strncmp(token, ADF_CFG_ASYM, strlen(ADF_CFG_ASYM))) {
1533 			/* Handle a special case of services 'asym;inline'
1534 			 * enabled where ASYM is handled by Inline firmware
1535 			 * at AE level. This configuration allows to enable
1536 			 * ASYM service without accel units assigned to
1537 			 * CRYPTO service, e.g.
1538 			 * num_inline_au = 6
1539 			 * num_cy_au = 0
1540 			 */
1541 			if (num_inline_au < num_au)
1542 				service_mask |= ADF_ACCEL_CRYPTO;
1543 
1544 			ret |= check_svc_to_hw_capabilities(
1545 			    accel_dev,
1546 			    token,
1547 			    ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC);
1548 		}
1549 
1550 		if (!strncmp(token, ADF_SERVICE_DC, strlen(ADF_SERVICE_DC))) {
1551 			service_mask |= ADF_ACCEL_COMPRESSION;
1552 			ret |= check_svc_to_hw_capabilities(
1553 			    accel_dev,
1554 			    token,
1555 			    ICP_ACCEL_CAPABILITIES_COMPRESSION);
1556 		}
1557 
1558 		if (!strncmp(token,
1559 			     ADF_SERVICE_INLINE,
1560 			     strlen(ADF_SERVICE_INLINE))) {
1561 			service_mask |= ADF_ACCEL_INLINE_CRYPTO;
1562 			ret |= check_svc_to_hw_capabilities(
1563 			    accel_dev, token, ICP_ACCEL_CAPABILITIES_INLINE);
1564 		}
1565 
1566 		token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
1567 	}
1568 
1569 	/* Ensure the user doesn't enable services that are not supported by
1570 	 * accelerator.
1571 	 */
1572 	if (ret) {
1573 		device_printf(GET_DEV(accel_dev),
1574 			      "Invalid accelerator configuration.\n");
1575 		return EFAULT;
1576 	}
1577 
1578 	if (!(service_mask & ADF_ACCEL_COMPRESSION) && num_dc_au > 0) {
1579 		device_printf(GET_DEV(accel_dev),
1580 			      "Invalid accel unit config.\n");
1581 		device_printf(
1582 		    GET_DEV(accel_dev),
1583 		    "DC accel units set when dc service not enabled\n");
1584 		return EFAULT;
1585 	}
1586 
1587 	if (!(service_mask & ADF_ACCEL_CRYPTO) && num_cy_au > 0) {
1588 		device_printf(GET_DEV(accel_dev),
1589 			      "Invalid accel unit config.\n");
1590 		device_printf(
1591 		    GET_DEV(accel_dev),
1592 		    "CY accel units set when cy service not enabled\n");
1593 		return EFAULT;
1594 	}
1595 
1596 	if (!(service_mask & ADF_ACCEL_INLINE_CRYPTO) && num_inline_au > 0) {
1597 		device_printf(GET_DEV(accel_dev),
1598 			      "Invalid accel unit config.\n"
1599 			      "Inline feature not supported.\n");
1600 		return EFAULT;
1601 	}
1602 
1603 	hw_data->service_mask = service_mask;
1604 	/* Ensure the user doesn't allocate more than max accel units */
1605 	if (num_au != (num_cy_au + num_dc_au + num_inline_au)) {
1606 		device_printf(GET_DEV(accel_dev),
1607 			      "Invalid accel unit config.\n");
1608 		device_printf(GET_DEV(accel_dev),
1609 			      "Max accel units is %d\n",
1610 			      num_au);
1611 		return EFAULT;
1612 	}
1613 
1614 	/* Ensure user allocates hardware resources for enabled services */
1615 	if (!num_cy_au && (service_mask & ADF_ACCEL_CRYPTO)) {
1616 		device_printf(GET_DEV(accel_dev),
1617 			      "Failed to enable cy service!\n");
1618 		device_printf(GET_DEV(accel_dev),
1619 			      "%s should not be 0",
1620 			      ADF_NUM_CY_ACCEL_UNITS);
1621 		return EFAULT;
1622 	}
1623 	if (!num_dc_au && (service_mask & ADF_ACCEL_COMPRESSION)) {
1624 		device_printf(GET_DEV(accel_dev),
1625 			      "Failed to enable dc service!\n");
1626 		device_printf(GET_DEV(accel_dev),
1627 			      "%s should not be 0",
1628 			      ADF_NUM_DC_ACCEL_UNITS);
1629 		return EFAULT;
1630 	}
1631 	if (!num_inline_au && (service_mask & ADF_ACCEL_INLINE_CRYPTO)) {
1632 		device_printf(GET_DEV(accel_dev), "Failed to enable");
1633 		device_printf(GET_DEV(accel_dev), " inline service!");
1634 		device_printf(GET_DEV(accel_dev),
1635 			      " %s should not be 0\n",
1636 			      ADF_NUM_INLINE_ACCEL_UNITS);
1637 		return EFAULT;
1638 	}
1639 
1640 	return 0;
1641 }
1642 
1643 static int
get_accel_unit_config(struct adf_accel_dev * accel_dev,u8 * num_cy_au,u8 * num_dc_au,u8 * num_inline_au)1644 get_accel_unit_config(struct adf_accel_dev *accel_dev,
1645 		      u8 *num_cy_au,
1646 		      u8 *num_dc_au,
1647 		      u8 *num_inline_au)
1648 {
1649 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
1650 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
1651 
1652 	/* Get the number of accel units allocated for each service */
1653 	snprintf(key, sizeof(key), ADF_NUM_CY_ACCEL_UNITS);
1654 	if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
1655 		return EFAULT;
1656 	if (compat_strtou8(val, 10, num_cy_au))
1657 		return EFAULT;
1658 	snprintf(key, sizeof(key), ADF_NUM_DC_ACCEL_UNITS);
1659 	if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
1660 		return EFAULT;
1661 	if (compat_strtou8(val, 10, num_dc_au))
1662 		return EFAULT;
1663 
1664 	snprintf(key, sizeof(key), ADF_NUM_INLINE_ACCEL_UNITS);
1665 	if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
1666 		return EFAULT;
1667 	if (compat_strtou8(val, 10, num_inline_au))
1668 		return EFAULT;
1669 
1670 	return 0;
1671 }
1672 
1673 /* Function reads the inline ingress/egress configuration
1674  * and returns the number of AEs reserved for ingress
1675  * and egress for accel units which are allocated for
1676  * inline service
1677  */
1678 static int
adf_get_inline_config(struct adf_accel_dev * accel_dev,u32 * num_ingress_aes)1679 adf_get_inline_config(struct adf_accel_dev *accel_dev, u32 *num_ingress_aes)
1680 {
1681 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1682 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
1683 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
1684 	char *value;
1685 	u32 num_au = hw_data->get_num_accel_units(hw_data);
1686 	unsigned long ingress, egress = 0;
1687 	struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
1688 	u32 num_inline_aes = 0, num_ingress_ae = 0;
1689 	u32 i = 0;
1690 
1691 	snprintf(key, sizeof(key), ADF_INLINE_INGRESS);
1692 	if (adf_cfg_get_param_value(accel_dev, ADF_INLINE_SEC, key, val)) {
1693 		device_printf(GET_DEV(accel_dev), "Failed to find ingress\n");
1694 		return EFAULT;
1695 	}
1696 	value = val;
1697 	value = strsep(&value, ADF_C4XXX_PERCENTAGE);
1698 	if (compat_strtoul(value, 10, &ingress))
1699 		return EFAULT;
1700 
1701 	snprintf(key, sizeof(key), ADF_INLINE_EGRESS);
1702 	if (adf_cfg_get_param_value(accel_dev, ADF_INLINE_SEC, key, val)) {
1703 		device_printf(GET_DEV(accel_dev), "Failed to find egress\n");
1704 		return EFAULT;
1705 	}
1706 	value = val;
1707 	value = strsep(&value, ADF_C4XXX_PERCENTAGE);
1708 	if (compat_strtoul(value, 10, &egress))
1709 		return EFAULT;
1710 
1711 	if (ingress + egress != ADF_C4XXX_100) {
1712 		device_printf(GET_DEV(accel_dev),
1713 			      "The sum of ingress and egress should be 100\n");
1714 		return EFAULT;
1715 	}
1716 
1717 	for (i = 0; i < num_au; i++) {
1718 		if (accel_unit[i].services == ADF_ACCEL_INLINE_CRYPTO)
1719 			num_inline_aes += accel_unit[i].num_ae;
1720 	}
1721 
1722 	num_ingress_ae = num_inline_aes * ingress / ADF_C4XXX_100;
1723 	if (((num_inline_aes * ingress) % ADF_C4XXX_100) >
1724 	    ADF_C4XXX_ROUND_LIMIT)
1725 		num_ingress_ae++;
1726 
1727 	*num_ingress_aes = num_ingress_ae;
1728 	return 0;
1729 }
1730 
1731 static int
adf_set_inline_ae_mask(struct adf_accel_dev * accel_dev)1732 adf_set_inline_ae_mask(struct adf_accel_dev *accel_dev)
1733 {
1734 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1735 	u32 num_au = hw_data->get_num_accel_units(hw_data);
1736 	struct adf_accel_unit_info *au_info = accel_dev->au_info;
1737 	struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
1738 	u32 num_ingress_ae = 0;
1739 	u32 ingress_msk = 0;
1740 	u32 i, j, ae_mask;
1741 
1742 	if (adf_get_inline_config(accel_dev, &num_ingress_ae))
1743 		return EFAULT;
1744 
1745 	for (i = 0; i < num_au; i++) {
1746 		j = 0;
1747 		if (accel_unit[i].services == ADF_ACCEL_INLINE_CRYPTO) {
1748 			/* AEs with inline service enabled are also used
1749 			 * for asymmetric crypto
1750 			 */
1751 			au_info->asym_ae_msk |= accel_unit[i].ae_mask;
1752 			ae_mask = accel_unit[i].ae_mask;
1753 			while (num_ingress_ae && ae_mask) {
1754 				if (ae_mask & 1) {
1755 					ingress_msk |= BIT(j);
1756 					num_ingress_ae--;
1757 				}
1758 				ae_mask = ae_mask >> 1;
1759 				j++;
1760 			}
1761 			au_info->inline_ingress_msk |= ingress_msk;
1762 
1763 			au_info->inline_egress_msk |=
1764 			    ~(au_info->inline_ingress_msk) &
1765 			    accel_unit[i].ae_mask;
1766 		}
1767 	}
1768 
1769 	return 0;
1770 }
1771 
1772 static int
adf_set_ae_mask(struct adf_accel_dev * accel_dev)1773 adf_set_ae_mask(struct adf_accel_dev *accel_dev)
1774 {
1775 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1776 	u32 num_au = hw_data->get_num_accel_units(hw_data);
1777 	struct adf_accel_unit_info *au_info = accel_dev->au_info;
1778 	struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
1779 	char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
1780 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
1781 	char *token, *cur_str;
1782 	bool asym_en = false, sym_en = false;
1783 	u32 i;
1784 
1785 	/* Get the services enabled by user */
1786 	snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
1787 	if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
1788 		return EFAULT;
1789 	cur_str = val;
1790 	token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
1791 	while (token) {
1792 		if (!strncmp(token, ADF_CFG_ASYM, strlen(ADF_CFG_ASYM)))
1793 			asym_en = true;
1794 		if (!strncmp(token, ADF_CFG_SYM, strlen(ADF_CFG_SYM)))
1795 			sym_en = true;
1796 		if (!strncmp(token, ADF_CFG_CY, strlen(ADF_CFG_CY))) {
1797 			sym_en = true;
1798 			asym_en = true;
1799 		}
1800 		token = strsep(&cur_str, ADF_SERVICES_SEPARATOR);
1801 	}
1802 
1803 	for (i = 0; i < num_au; i++) {
1804 		if (accel_unit[i].services == ADF_ACCEL_CRYPTO) {
1805 			/* AEs that support crypto can perform both
1806 			 * symmetric and asymmetric crypto, however
1807 			 * we only enable the threads if the relevant
1808 			 * service is also enabled
1809 			 */
1810 			if (asym_en)
1811 				au_info->asym_ae_msk |= accel_unit[i].ae_mask;
1812 			if (sym_en)
1813 				au_info->sym_ae_msk |= accel_unit[i].ae_mask;
1814 		} else if (accel_unit[i].services == ADF_ACCEL_COMPRESSION) {
1815 			au_info->dc_ae_msk |= accel_unit[i].comp_ae_mask;
1816 		}
1817 	}
1818 	return 0;
1819 }
1820 
1821 static int
adf_init_accel_unit_services(struct adf_accel_dev * accel_dev)1822 adf_init_accel_unit_services(struct adf_accel_dev *accel_dev)
1823 {
1824 	u8 num_cy_au, num_dc_au, num_inline_au;
1825 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1826 	u32 num_au = hw_data->get_num_accel_units(hw_data);
1827 	struct adf_accel_unit *accel_unit;
1828 	const struct adf_ae_info *ae_info;
1829 	int i;
1830 
1831 	if (get_accel_unit_config(
1832 		accel_dev, &num_cy_au, &num_dc_au, &num_inline_au)) {
1833 		device_printf(GET_DEV(accel_dev), "Invalid accel unit cfg\n");
1834 		return EFAULT;
1835 	}
1836 
1837 	if (check_accel_unit_config(
1838 		accel_dev, num_cy_au, num_dc_au, num_inline_au))
1839 		return EFAULT;
1840 
1841 	accel_dev->au_info = kzalloc(sizeof(*accel_dev->au_info), GFP_KERNEL);
1842 	if (!accel_dev->au_info)
1843 		return ENOMEM;
1844 
1845 	accel_dev->au_info->num_cy_au = num_cy_au;
1846 	accel_dev->au_info->num_dc_au = num_dc_au;
1847 	accel_dev->au_info->num_inline_au = num_inline_au;
1848 
1849 	if (get_ae_info(hw_data, &ae_info)) {
1850 		device_printf(GET_DEV(accel_dev), "Failed to get ae info\n");
1851 		goto err_au_info;
1852 	}
1853 	accel_dev->au_info->ae_info = ae_info;
1854 
1855 	if (get_accel_unit(hw_data, &accel_unit)) {
1856 		device_printf(GET_DEV(accel_dev), "Failed to get accel unit\n");
1857 		goto err_ae_info;
1858 	}
1859 
1860 	/* Enable compression accel units */
1861 	/* Accel units with 4AEs are reserved for compression first */
1862 	for (i = num_au - 1; i >= 0 && num_dc_au > 0; i--) {
1863 		if (accel_unit[i].num_ae == ADF_C4XXX_4_AE) {
1864 			accel_unit[i].services = ADF_ACCEL_COMPRESSION;
1865 			num_dc_au--;
1866 		}
1867 	}
1868 	for (i = num_au - 1; i >= 0 && num_dc_au > 0; i--) {
1869 		if (accel_unit[i].services == ADF_ACCEL_SERVICE_NULL) {
1870 			accel_unit[i].services = ADF_ACCEL_COMPRESSION;
1871 			num_dc_au--;
1872 		}
1873 	}
1874 
1875 	/* Enable inline accel units */
1876 	for (i = 0; i < num_au && num_inline_au > 0; i++) {
1877 		if (accel_unit[i].services == ADF_ACCEL_SERVICE_NULL) {
1878 			accel_unit[i].services = ADF_ACCEL_INLINE_CRYPTO;
1879 			num_inline_au--;
1880 		}
1881 	}
1882 
1883 	/* Enable crypto accel units */
1884 	for (i = 0; i < num_au && num_cy_au > 0; i++) {
1885 		if (accel_unit[i].services == ADF_ACCEL_SERVICE_NULL) {
1886 			accel_unit[i].services = ADF_ACCEL_CRYPTO;
1887 			num_cy_au--;
1888 		}
1889 	}
1890 	accel_dev->au_info->au = accel_unit;
1891 	return 0;
1892 
1893 err_ae_info:
1894 	accel_dev->au_info->ae_info = NULL;
1895 err_au_info:
1896 	kfree(accel_dev->au_info);
1897 	accel_dev->au_info = NULL;
1898 	return EFAULT;
1899 }
1900 
1901 static void
adf_exit_accel_unit_services(struct adf_accel_dev * accel_dev)1902 adf_exit_accel_unit_services(struct adf_accel_dev *accel_dev)
1903 {
1904 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
1905 	u32 num_au = hw_data->get_num_accel_units(hw_data);
1906 	int i;
1907 
1908 	if (accel_dev->au_info) {
1909 		if (accel_dev->au_info->au) {
1910 			for (i = 0; i < num_au; i++) {
1911 				accel_dev->au_info->au[i].services =
1912 				    ADF_ACCEL_SERVICE_NULL;
1913 			}
1914 		}
1915 		accel_dev->au_info->au = NULL;
1916 		accel_dev->au_info->ae_info = NULL;
1917 		kfree(accel_dev->au_info);
1918 		accel_dev->au_info = NULL;
1919 	}
1920 }
1921 
1922 static inline void
adf_c4xxx_reset_hw_units(struct adf_accel_dev * accel_dev)1923 adf_c4xxx_reset_hw_units(struct adf_accel_dev *accel_dev)
1924 {
1925 	struct resource *pmisc =
1926 	    (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
1927 
1928 	u32 global_clk_enable = ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_ARAM |
1929 	    ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_ICI_ENABLE |
1930 	    ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_ICE_ENABLE;
1931 
1932 	u32 ixp_reset_generic = ADF_C4XXX_IXP_RESET_GENERIC_ARAM |
1933 	    ADF_C4XXX_IXP_RESET_GENERIC_INLINE_EGRESS |
1934 	    ADF_C4XXX_IXP_RESET_GENERIC_INLINE_INGRESS;
1935 
1936 	/* To properly reset each of the units driver must:
1937 	 * 1)Call out resetactive state using ixp reset generic
1938 	 *   register;
1939 	 * 2)Disable generic clock;
1940 	 * 3)Take device out of reset by clearing ixp reset
1941 	 *   generic register;
1942 	 * 4)Re-enable generic clock;
1943 	 */
1944 	ADF_CSR_WR(pmisc, ADF_C4XXX_IXP_RESET_GENERIC, ixp_reset_generic);
1945 	ADF_CSR_WR(pmisc,
1946 		   ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC,
1947 		   ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC_DISABLE_ALL);
1948 	ADF_CSR_WR(pmisc,
1949 		   ADF_C4XXX_IXP_RESET_GENERIC,
1950 		   ADF_C4XXX_IXP_RESET_GENERIC_OUT_OF_RESET_TRIGGER);
1951 	ADF_CSR_WR(pmisc,
1952 		   ADF_C4XXX_GLOBAL_CLK_ENABLE_GENERIC,
1953 		   global_clk_enable);
1954 }
1955 
1956 static int
adf_init_accel_units(struct adf_accel_dev * accel_dev)1957 adf_init_accel_units(struct adf_accel_dev *accel_dev)
1958 {
1959 	struct resource *csr =
1960 	    (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
1961 
1962 	if (adf_init_accel_unit_services(accel_dev))
1963 		return EFAULT;
1964 
1965 	/* Set cy and dc enabled AE masks */
1966 	if (accel_dev->au_info->num_cy_au || accel_dev->au_info->num_dc_au) {
1967 		if (adf_set_ae_mask(accel_dev)) {
1968 			device_printf(GET_DEV(accel_dev),
1969 				      "Failed to set ae masks\n");
1970 			goto err_au;
1971 		}
1972 	}
1973 	/* Set ingress/egress ae mask if inline is enabled */
1974 	if (accel_dev->au_info->num_inline_au) {
1975 		if (adf_set_inline_ae_mask(accel_dev)) {
1976 			device_printf(GET_DEV(accel_dev),
1977 				      "Failed to set inline ae masks\n");
1978 			goto err_au;
1979 		}
1980 	}
1981 	/* Define ARAM regions */
1982 	if (c4xxx_init_aram_config(accel_dev)) {
1983 		device_printf(GET_DEV(accel_dev),
1984 			      "Failed to init aram config\n");
1985 		goto err_au;
1986 	}
1987 	/* Configure h/w registers for inline operations */
1988 	if (accel_dev->au_info->num_inline_au > 0)
1989 		/* Initialise configuration parsing registers */
1990 		if (c4xxx_init_inline_hw(accel_dev))
1991 			goto err_au;
1992 
1993 	c4xxx_set_sadb_size(accel_dev);
1994 
1995 	if (accel_dev->au_info->num_inline_au > 0) {
1996 		/* ici/ice interrupt shall be enabled after msi-x enabled */
1997 		ADF_CSR_WR(csr,
1998 			   ADF_C4XXX_ERRMSK11,
1999 			   ADF_C4XXX_ERRMSK11_ERR_DISABLE_ICI_ICE_INTR);
2000 		adf_enable_inline_notification(accel_dev);
2001 	}
2002 
2003 	update_hw_capability(accel_dev);
2004 	if (adf_add_debugfs_info(accel_dev)) {
2005 		device_printf(GET_DEV(accel_dev),
2006 			      "Failed to add debug FS information\n");
2007 		goto err_au;
2008 	}
2009 	return 0;
2010 
2011 err_au:
2012 	/* Free and clear accel unit data structures */
2013 	adf_exit_accel_unit_services(accel_dev);
2014 	return EFAULT;
2015 }
2016 
2017 static void
adf_exit_accel_units(struct adf_accel_dev * accel_dev)2018 adf_exit_accel_units(struct adf_accel_dev *accel_dev)
2019 {
2020 	adf_exit_accel_unit_services(accel_dev);
2021 	/* Free aram mapping structure */
2022 	c4xxx_exit_aram_config(accel_dev);
2023 	/* Remove entries in debug FS */
2024 	adf_remove_debugfs_info(accel_dev);
2025 }
2026 
2027 static const char *
get_obj_name(struct adf_accel_dev * accel_dev,enum adf_accel_unit_services service)2028 get_obj_name(struct adf_accel_dev *accel_dev,
2029 	     enum adf_accel_unit_services service)
2030 {
2031 	u32 capabilities = GET_HW_DATA(accel_dev)->accel_capabilities_mask;
2032 	bool sym_only_sku = false;
2033 
2034 	/* Check if SKU is capable only of symmetric cryptography
2035 	 * via device capabilities.
2036 	 */
2037 	if ((capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC) &&
2038 	    !(capabilities & ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC) &&
2039 	    !(capabilities & ADF_ACCEL_CAPABILITIES_COMPRESSION))
2040 		sym_only_sku = true;
2041 
2042 	switch (service) {
2043 	case ADF_ACCEL_INLINE_CRYPTO:
2044 		return ADF_C4XXX_INLINE_OBJ;
2045 	case ADF_ACCEL_CRYPTO:
2046 		if (sym_only_sku)
2047 			return ADF_C4XXX_SYM_OBJ;
2048 		else
2049 			return ADF_C4XXX_CY_OBJ;
2050 		break;
2051 	case ADF_ACCEL_COMPRESSION:
2052 		return ADF_C4XXX_DC_OBJ;
2053 	default:
2054 		return NULL;
2055 	}
2056 }
2057 
2058 static uint32_t
get_objs_num(struct adf_accel_dev * accel_dev)2059 get_objs_num(struct adf_accel_dev *accel_dev)
2060 {
2061 	u32 srv = 0;
2062 	u32 max_srv_id = 0;
2063 	unsigned long service_mask = accel_dev->hw_device->service_mask;
2064 
2065 	/* The objects number corresponds to the number of services */
2066 	for_each_set_bit(srv, &service_mask, ADF_C4XXX_MAX_OBJ)
2067 	{
2068 		max_srv_id = srv;
2069 	}
2070 
2071 	return (max_srv_id + 1);
2072 }
2073 
2074 static uint32_t
get_obj_cfg_ae_mask(struct adf_accel_dev * accel_dev,enum adf_accel_unit_services service)2075 get_obj_cfg_ae_mask(struct adf_accel_dev *accel_dev,
2076 		    enum adf_accel_unit_services service)
2077 {
2078 	u32 ae_mask = 0;
2079 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
2080 	u32 num_au = hw_data->get_num_accel_units(hw_data);
2081 	struct adf_accel_unit *accel_unit = accel_dev->au_info->au;
2082 	u32 i = 0;
2083 
2084 	if (service == ADF_ACCEL_SERVICE_NULL)
2085 		return 0;
2086 
2087 	for (i = 0; i < num_au; i++) {
2088 		if (accel_unit[i].services == service)
2089 			ae_mask |= accel_unit[i].ae_mask;
2090 	}
2091 	return ae_mask;
2092 }
2093 
2094 static void
configure_iov_threads(struct adf_accel_dev * accel_dev,bool enable)2095 configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
2096 {
2097 	struct resource *addr;
2098 	struct adf_hw_device_data *hw_data = accel_dev->hw_device;
2099 	u32 num_aes = hw_data->get_num_aes(hw_data);
2100 	u32 reg = 0x0;
2101 	u32 i;
2102 
2103 	addr = (&GET_BARS(accel_dev)[ADF_C4XXX_PMISC_BAR])->virt_addr;
2104 
2105 	/* Set/Unset Valid bits in AE Thread to PCIe Function Mapping */
2106 	for (i = 0; i < ADF_C4XXX_AE2FUNC_REG_PER_AE * num_aes; i++) {
2107 		reg = ADF_CSR_RD(addr + ADF_C4XXX_AE2FUNC_MAP_OFFSET,
2108 				 i * ADF_C4XXX_AE2FUNC_MAP_REG_SIZE);
2109 		if (enable)
2110 			reg |= ADF_C4XXX_AE2FUNC_MAP_VALID;
2111 		else
2112 			reg &= ~ADF_C4XXX_AE2FUNC_MAP_VALID;
2113 		ADF_CSR_WR(addr + ADF_C4XXX_AE2FUNC_MAP_OFFSET,
2114 			   i * ADF_C4XXX_AE2FUNC_MAP_REG_SIZE,
2115 			   reg);
2116 	}
2117 }
2118 
2119 void
adf_init_hw_data_c4xxx(struct adf_hw_device_data * hw_data)2120 adf_init_hw_data_c4xxx(struct adf_hw_device_data *hw_data)
2121 {
2122 	hw_data->dev_class = &c4xxx_class;
2123 	hw_data->instance_id = c4xxx_class.instances++;
2124 	hw_data->num_banks = ADF_C4XXX_ETR_MAX_BANKS;
2125 	hw_data->num_rings_per_bank = ADF_C4XXX_NUM_RINGS_PER_BANK;
2126 	hw_data->num_accel = ADF_C4XXX_MAX_ACCELERATORS;
2127 	hw_data->num_engines = ADF_C4XXX_MAX_ACCELENGINES;
2128 	hw_data->num_logical_accel = 1;
2129 	hw_data->tx_rx_gap = ADF_C4XXX_RX_RINGS_OFFSET;
2130 	hw_data->tx_rings_mask = ADF_C4XXX_TX_RINGS_MASK;
2131 	hw_data->alloc_irq = adf_isr_resource_alloc;
2132 	hw_data->free_irq = adf_isr_resource_free;
2133 	hw_data->enable_error_correction = adf_enable_error_correction;
2134 	hw_data->init_ras = adf_init_ras;
2135 	hw_data->exit_ras = adf_exit_ras;
2136 	hw_data->ras_interrupts = adf_ras_interrupts;
2137 	hw_data->get_accel_mask = get_accel_mask;
2138 	hw_data->get_ae_mask = get_ae_mask;
2139 	hw_data->get_num_accels = get_num_accels;
2140 	hw_data->get_num_aes = get_num_aes;
2141 	hw_data->get_num_accel_units = get_num_accel_units;
2142 	hw_data->get_sram_bar_id = get_sram_bar_id;
2143 	hw_data->get_etr_bar_id = get_etr_bar_id;
2144 	hw_data->get_misc_bar_id = get_misc_bar_id;
2145 	hw_data->get_arb_info = get_arb_info;
2146 	hw_data->get_admin_info = get_admin_info;
2147 	hw_data->get_errsou_offset = get_errsou_offset;
2148 	hw_data->get_clock_speed = get_clock_speed;
2149 	hw_data->get_eth_doorbell_msg = get_eth_doorbell_msg;
2150 	hw_data->get_sku = get_sku;
2151 	hw_data->heartbeat_ctr_num = ADF_NUM_THREADS_PER_AE;
2152 	hw_data->check_prod_sku = c4xxx_check_prod_sku;
2153 	hw_data->fw_name = ADF_C4XXX_FW;
2154 	hw_data->fw_mmp_name = ADF_C4XXX_MMP;
2155 	hw_data->get_obj_name = get_obj_name;
2156 	hw_data->get_objs_num = get_objs_num;
2157 	hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask;
2158 	hw_data->init_admin_comms = adf_init_admin_comms;
2159 	hw_data->exit_admin_comms = adf_exit_admin_comms;
2160 	hw_data->configure_iov_threads = configure_iov_threads;
2161 	hw_data->disable_iov = adf_disable_sriov;
2162 	hw_data->send_admin_init = adf_send_admin_init;
2163 	hw_data->init_arb = adf_init_arb_c4xxx;
2164 	hw_data->exit_arb = adf_exit_arb_c4xxx;
2165 	hw_data->disable_arb = adf_disable_arb;
2166 	hw_data->enable_ints = adf_enable_ints;
2167 	hw_data->set_ssm_wdtimer = c4xxx_set_ssm_wdtimer;
2168 	hw_data->check_slice_hang = c4xxx_check_slice_hang;
2169 	hw_data->reset_device = adf_reset_flr;
2170 	hw_data->restore_device = adf_c4xxx_dev_restore;
2171 	hw_data->init_accel_units = adf_init_accel_units;
2172 	hw_data->reset_hw_units = adf_c4xxx_reset_hw_units;
2173 	hw_data->exit_accel_units = adf_exit_accel_units;
2174 	hw_data->ring_to_svc_map = ADF_DEFAULT_RING_TO_SRV_MAP;
2175 	hw_data->get_heartbeat_status = adf_get_heartbeat_status;
2176 	hw_data->get_ae_clock = get_ae_clock;
2177 	hw_data->clock_frequency = ADF_C4XXX_AE_FREQ;
2178 	hw_data->measure_clock = measure_clock;
2179 	hw_data->add_pke_stats = adf_pke_replay_counters_add_c4xxx;
2180 	hw_data->remove_pke_stats = adf_pke_replay_counters_remove_c4xxx;
2181 	hw_data->add_misc_error = adf_misc_error_add_c4xxx;
2182 	hw_data->remove_misc_error = adf_misc_error_remove_c4xxx;
2183 	hw_data->extended_dc_capabilities = 0;
2184 	hw_data->get_storage_enabled = get_storage_enabled;
2185 	hw_data->query_storage_cap = 0;
2186 	hw_data->get_accel_cap = c4xxx_get_hw_cap;
2187 	hw_data->configure_accel_units = c4xxx_configure_accel_units;
2188 	hw_data->pre_reset = adf_dev_pre_reset;
2189 	hw_data->post_reset = adf_dev_post_reset;
2190 	hw_data->get_ring_to_svc_map = adf_cfg_get_services_enabled;
2191 	hw_data->count_ras_event = adf_fw_count_ras_event;
2192 	hw_data->config_device = adf_config_device;
2193 	hw_data->set_asym_rings_mask = adf_cfg_set_asym_rings_mask;
2194 
2195 	adf_gen2_init_hw_csr_info(&hw_data->csr_info);
2196 	adf_gen2_init_pf_pfvf_ops(&hw_data->csr_info.pfvf_ops);
2197 	hw_data->csr_info.arb_enable_mask = 0xF;
2198 }
2199 
2200 void
adf_clean_hw_data_c4xxx(struct adf_hw_device_data * hw_data)2201 adf_clean_hw_data_c4xxx(struct adf_hw_device_data *hw_data)
2202 {
2203 	hw_data->dev_class->instances--;
2204 }
2205 
2206 void
remove_oid(struct adf_accel_dev * accel_dev,struct sysctl_oid * oid)2207 remove_oid(struct adf_accel_dev *accel_dev, struct sysctl_oid *oid)
2208 {
2209 	struct sysctl_ctx_list *qat_sysctl_ctx;
2210 	int ret;
2211 
2212 	qat_sysctl_ctx =
2213 	    device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev);
2214 
2215 	ret = sysctl_ctx_entry_del(qat_sysctl_ctx, oid);
2216 	if (ret)
2217 		device_printf(GET_DEV(accel_dev), "Failed to delete entry\n");
2218 
2219 	ret = sysctl_remove_oid(oid, 1, 1);
2220 	if (ret)
2221 		device_printf(GET_DEV(accel_dev), "Failed to delete oid\n");
2222 }
2223