1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 #include <adf_accel_devices.h>
4 #include <adf_common_drv.h>
5 #include <adf_cfg.h>
6 #include <adf_pfvf_msg.h>
7 #include <adf_dev_err.h>
8 #include <adf_gen2_hw_data.h>
9 #include <adf_gen2_pfvf.h>
10 #include "adf_c62x_hw_data.h"
11 #include "icp_qat_hw.h"
12 #include "adf_cfg.h"
13 #include "adf_heartbeat.h"
14
15 /* Worker thread to service arbiter mappings */
16 static const u32 thrd_to_arb_map[ADF_C62X_MAX_ACCELENGINES] =
17 { 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA,
18 0x11222AAA, 0x12222AAA, 0x11222AAA, 0x12222AAA, 0x11222AAA };
19
20 enum { DEV_C62X_SKU_1 = 0, DEV_C62X_SKU_2 = 1 };
21
22 static u32 thrd_to_arb_map_gen[ADF_C62X_MAX_ACCELENGINES] = { 0 };
23
24 static struct adf_hw_device_class c62x_class = {.name = ADF_C62X_DEVICE_NAME,
25 .type = DEV_C62X,
26 .instances = 0 };
27
28 static u32
get_accel_mask(struct adf_accel_dev * accel_dev)29 get_accel_mask(struct adf_accel_dev *accel_dev)
30 {
31 device_t pdev = accel_dev->accel_pci_dev.pci_dev;
32
33 u32 fuse;
34 u32 straps;
35
36 fuse = pci_read_config(pdev, ADF_DEVICE_FUSECTL_OFFSET, 4);
37 straps = pci_read_config(pdev, ADF_C62X_SOFTSTRAP_CSR_OFFSET, 4);
38
39 return (~(fuse | straps)) >> ADF_C62X_ACCELERATORS_REG_OFFSET &
40 ADF_C62X_ACCELERATORS_MASK;
41 }
42
43 static u32
get_ae_mask(struct adf_accel_dev * accel_dev)44 get_ae_mask(struct adf_accel_dev *accel_dev)
45 {
46 device_t pdev = accel_dev->accel_pci_dev.pci_dev;
47 u32 fuse;
48 u32 me_straps;
49 u32 me_disable;
50 u32 ssms_disabled;
51
52 fuse = pci_read_config(pdev, ADF_DEVICE_FUSECTL_OFFSET, 4);
53 me_straps = pci_read_config(pdev, ADF_C62X_SOFTSTRAP_CSR_OFFSET, 4);
54
55 /* If SSMs are disabled, then disable the corresponding MEs */
56 ssms_disabled =
57 (~get_accel_mask(accel_dev)) & ADF_C62X_ACCELERATORS_MASK;
58 me_disable = 0x3;
59 while (ssms_disabled) {
60 if (ssms_disabled & 1)
61 me_straps |= me_disable;
62 ssms_disabled >>= 1;
63 me_disable <<= 2;
64 }
65
66 return (~(fuse | me_straps)) & ADF_C62X_ACCELENGINES_MASK;
67 }
68
69 static u32
get_num_accels(struct adf_hw_device_data * self)70 get_num_accels(struct adf_hw_device_data *self)
71 {
72 u32 i, ctr = 0;
73
74 if (!self || !self->accel_mask)
75 return 0;
76
77 for (i = 0; i < ADF_C62X_MAX_ACCELERATORS; i++) {
78 if (self->accel_mask & (1 << i))
79 ctr++;
80 }
81 return ctr;
82 }
83
84 static u32
get_num_aes(struct adf_hw_device_data * self)85 get_num_aes(struct adf_hw_device_data *self)
86 {
87 u32 i, ctr = 0;
88
89 if (!self || !self->ae_mask)
90 return 0;
91
92 for (i = 0; i < ADF_C62X_MAX_ACCELENGINES; i++) {
93 if (self->ae_mask & (1 << i))
94 ctr++;
95 }
96 return ctr;
97 }
98
99 static u32
get_misc_bar_id(struct adf_hw_device_data * self)100 get_misc_bar_id(struct adf_hw_device_data *self)
101 {
102 return ADF_C62X_PMISC_BAR;
103 }
104
105 static u32
get_etr_bar_id(struct adf_hw_device_data * self)106 get_etr_bar_id(struct adf_hw_device_data *self)
107 {
108 return ADF_C62X_ETR_BAR;
109 }
110
111 static u32
get_sram_bar_id(struct adf_hw_device_data * self)112 get_sram_bar_id(struct adf_hw_device_data *self)
113 {
114 return ADF_C62X_SRAM_BAR;
115 }
116
117 static enum dev_sku_info
get_sku(struct adf_hw_device_data * self)118 get_sku(struct adf_hw_device_data *self)
119 {
120 int aes = get_num_aes(self);
121
122 if (aes == 8)
123 return DEV_SKU_2;
124 else if (aes == 10)
125 return DEV_SKU_4;
126
127 return DEV_SKU_UNKNOWN;
128 }
129
130 static void
adf_get_arbiter_mapping(struct adf_accel_dev * accel_dev,u32 const ** arb_map_config)131 adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
132 u32 const **arb_map_config)
133 {
134 int i;
135 struct adf_hw_device_data *hw_device = accel_dev->hw_device;
136
137 for (i = 0; i < ADF_C62X_MAX_ACCELENGINES; i++) {
138 thrd_to_arb_map_gen[i] = 0;
139 if (hw_device->ae_mask & (1 << i))
140 thrd_to_arb_map_gen[i] = thrd_to_arb_map[i];
141 }
142 adf_cfg_gen_dispatch_arbiter(accel_dev,
143 thrd_to_arb_map,
144 thrd_to_arb_map_gen,
145 ADF_C62X_MAX_ACCELENGINES);
146 *arb_map_config = thrd_to_arb_map_gen;
147 }
148
149 static void
get_arb_info(struct arb_info * arb_csrs_info)150 get_arb_info(struct arb_info *arb_csrs_info)
151 {
152 arb_csrs_info->arbiter_offset = ADF_C62X_ARB_OFFSET;
153 arb_csrs_info->wrk_thd_2_srv_arb_map =
154 ADF_C62X_ARB_WRK_2_SER_MAP_OFFSET;
155 arb_csrs_info->wrk_cfg_offset = ADF_C62X_ARB_WQCFG_OFFSET;
156 }
157
158 static void
get_admin_info(struct admin_info * admin_csrs_info)159 get_admin_info(struct admin_info *admin_csrs_info)
160 {
161 admin_csrs_info->mailbox_offset = ADF_C62X_MAILBOX_BASE_OFFSET;
162 admin_csrs_info->admin_msg_ur = ADF_C62X_ADMINMSGUR_OFFSET;
163 admin_csrs_info->admin_msg_lr = ADF_C62X_ADMINMSGLR_OFFSET;
164 }
165
166 static void
get_errsou_offset(u32 * errsou3,u32 * errsou5)167 get_errsou_offset(u32 *errsou3, u32 *errsou5)
168 {
169 *errsou3 = ADF_C62X_ERRSOU3;
170 *errsou5 = ADF_C62X_ERRSOU5;
171 }
172
173 static u32
get_clock_speed(struct adf_hw_device_data * self)174 get_clock_speed(struct adf_hw_device_data *self)
175 {
176 /* CPP clock is half high-speed clock */
177 return self->clock_frequency / 2;
178 }
179
180 static void
adf_enable_error_correction(struct adf_accel_dev * accel_dev)181 adf_enable_error_correction(struct adf_accel_dev *accel_dev)
182 {
183 struct adf_hw_device_data *hw_device = accel_dev->hw_device;
184 struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR];
185 struct resource *csr = misc_bar->virt_addr;
186 unsigned int val, i;
187 unsigned int mask;
188
189 /* Enable Accel Engine error detection & correction */
190 mask = hw_device->ae_mask;
191 for (i = 0; mask; i++, mask >>= 1) {
192 if (!(mask & 1))
193 continue;
194 val = ADF_CSR_RD(csr, ADF_C62X_AE_CTX_ENABLES(i));
195 val |= ADF_C62X_ENABLE_AE_ECC_ERR;
196 ADF_CSR_WR(csr, ADF_C62X_AE_CTX_ENABLES(i), val);
197 val = ADF_CSR_RD(csr, ADF_C62X_AE_MISC_CONTROL(i));
198 val |= ADF_C62X_ENABLE_AE_ECC_PARITY_CORR;
199 ADF_CSR_WR(csr, ADF_C62X_AE_MISC_CONTROL(i), val);
200 }
201
202 /* Enable shared memory error detection & correction */
203 mask = hw_device->accel_mask;
204 for (i = 0; mask; i++, mask >>= 1) {
205 if (!(mask & 1))
206 continue;
207 val = ADF_CSR_RD(csr, ADF_C62X_UERRSSMSH(i));
208 val |= ADF_C62X_ERRSSMSH_EN;
209 ADF_CSR_WR(csr, ADF_C62X_UERRSSMSH(i), val);
210 val = ADF_CSR_RD(csr, ADF_C62X_CERRSSMSH(i));
211 val |= ADF_C62X_ERRSSMSH_EN;
212 ADF_CSR_WR(csr, ADF_C62X_CERRSSMSH(i), val);
213 }
214 }
215
216 static void
adf_enable_ints(struct adf_accel_dev * accel_dev)217 adf_enable_ints(struct adf_accel_dev *accel_dev)
218 {
219 struct resource *addr;
220
221 addr = (&GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR])->virt_addr;
222
223 /* Enable bundle and misc interrupts */
224 ADF_CSR_WR(addr, ADF_C62X_SMIAPF0_MASK_OFFSET, ADF_C62X_SMIA0_MASK);
225 ADF_CSR_WR(addr, ADF_C62X_SMIAPF1_MASK_OFFSET, ADF_C62X_SMIA1_MASK);
226 }
227
228 static u32
get_ae_clock(struct adf_hw_device_data * self)229 get_ae_clock(struct adf_hw_device_data *self)
230 {
231 /*
232 * Clock update interval is <16> ticks for c62x.
233 */
234 return self->clock_frequency / 16;
235 }
236
237 static int
get_storage_enabled(struct adf_accel_dev * accel_dev,uint32_t * storage_enabled)238 get_storage_enabled(struct adf_accel_dev *accel_dev, uint32_t *storage_enabled)
239 {
240 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
241 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
242
243 strlcpy(key, ADF_STORAGE_FIRMWARE_ENABLED, sizeof(key));
244 if (!adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val)) {
245 if (kstrtouint(val, 0, storage_enabled))
246 return -EFAULT;
247 }
248 return 0;
249 }
250
251 static int
measure_clock(struct adf_accel_dev * accel_dev)252 measure_clock(struct adf_accel_dev *accel_dev)
253 {
254 u32 frequency;
255 int ret = 0;
256
257 ret = adf_dev_measure_clock(accel_dev,
258 &frequency,
259 ADF_C62X_MIN_AE_FREQ,
260 ADF_C62X_MAX_AE_FREQ);
261 if (ret)
262 return ret;
263
264 accel_dev->hw_device->clock_frequency = frequency;
265 return 0;
266 }
267
268 static u32
c62x_get_hw_cap(struct adf_accel_dev * accel_dev)269 c62x_get_hw_cap(struct adf_accel_dev *accel_dev)
270 {
271 device_t pdev = accel_dev->accel_pci_dev.pci_dev;
272 u32 legfuses;
273 u32 capabilities;
274 u32 straps;
275 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
276 u32 fuses = hw_data->fuses;
277
278 /* Read accelerator capabilities mask */
279 legfuses = pci_read_config(pdev, ADF_DEVICE_LEGFUSE_OFFSET, 4);
280
281 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC +
282 ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC +
283 ICP_ACCEL_CAPABILITIES_CIPHER +
284 ICP_ACCEL_CAPABILITIES_AUTHENTICATION +
285 ICP_ACCEL_CAPABILITIES_COMPRESSION + ICP_ACCEL_CAPABILITIES_ZUC +
286 ICP_ACCEL_CAPABILITIES_SHA3 + ICP_ACCEL_CAPABILITIES_HKDF +
287 ICP_ACCEL_CAPABILITIES_ECEDMONT +
288 ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN;
289 if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE)
290 capabilities &= ~(ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
291 ICP_ACCEL_CAPABILITIES_CIPHER |
292 ICP_ACCEL_CAPABILITIES_HKDF |
293 ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN);
294 if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE)
295 capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
296 if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
297 capabilities &= ~(ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
298 ICP_ACCEL_CAPABILITIES_ECEDMONT);
299 if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
300 capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
301 if (legfuses & ICP_ACCEL_MASK_EIA3_SLICE)
302 capabilities &= ~ICP_ACCEL_CAPABILITIES_ZUC;
303 if (legfuses & ICP_ACCEL_MASK_SHA3_SLICE)
304 capabilities &= ~ICP_ACCEL_CAPABILITIES_SHA3;
305
306 straps = pci_read_config(pdev, ADF_C62X_SOFTSTRAP_CSR_OFFSET, 4);
307 if ((straps | fuses) & ADF_C62X_POWERGATE_PKE)
308 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
309 if ((straps | fuses) & ADF_C62X_POWERGATE_DC)
310 capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
311
312 return capabilities;
313 }
314
315 static const char *
get_obj_name(struct adf_accel_dev * accel_dev,enum adf_accel_unit_services service)316 get_obj_name(struct adf_accel_dev *accel_dev,
317 enum adf_accel_unit_services service)
318 {
319 return ADF_CXXX_AE_FW_NAME_CUSTOM1;
320 }
321
322 static uint32_t
get_objs_num(struct adf_accel_dev * accel_dev)323 get_objs_num(struct adf_accel_dev *accel_dev)
324 {
325 return 1;
326 }
327
328 static uint32_t
get_obj_cfg_ae_mask(struct adf_accel_dev * accel_dev,enum adf_accel_unit_services services)329 get_obj_cfg_ae_mask(struct adf_accel_dev *accel_dev,
330 enum adf_accel_unit_services services)
331 {
332 return accel_dev->hw_device->ae_mask;
333 }
334
335 void
adf_init_hw_data_c62x(struct adf_hw_device_data * hw_data)336 adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
337 {
338 hw_data->dev_class = &c62x_class;
339 hw_data->instance_id = c62x_class.instances++;
340 hw_data->num_banks = ADF_C62X_ETR_MAX_BANKS;
341 hw_data->num_rings_per_bank = ADF_ETR_MAX_RINGS_PER_BANK;
342 hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS;
343 hw_data->num_logical_accel = 1;
344 hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES;
345 hw_data->tx_rx_gap = ADF_C62X_RX_RINGS_OFFSET;
346 hw_data->tx_rings_mask = ADF_C62X_TX_RINGS_MASK;
347 hw_data->alloc_irq = adf_isr_resource_alloc;
348 hw_data->free_irq = adf_isr_resource_free;
349 hw_data->enable_error_correction = adf_enable_error_correction;
350 hw_data->print_err_registers = adf_print_err_registers;
351 hw_data->get_accel_mask = get_accel_mask;
352 hw_data->get_ae_mask = get_ae_mask;
353 hw_data->get_num_accels = get_num_accels;
354 hw_data->get_num_aes = get_num_aes;
355 hw_data->get_sram_bar_id = get_sram_bar_id;
356 hw_data->get_etr_bar_id = get_etr_bar_id;
357 hw_data->get_misc_bar_id = get_misc_bar_id;
358 hw_data->get_arb_info = get_arb_info;
359 hw_data->get_admin_info = get_admin_info;
360 hw_data->get_errsou_offset = get_errsou_offset;
361 hw_data->get_clock_speed = get_clock_speed;
362 hw_data->get_sku = get_sku;
363 hw_data->heartbeat_ctr_num = ADF_NUM_HB_CNT_PER_AE;
364 hw_data->fw_name = ADF_C62X_FW;
365 hw_data->fw_mmp_name = ADF_C62X_MMP;
366 hw_data->init_admin_comms = adf_init_admin_comms;
367 hw_data->exit_admin_comms = adf_exit_admin_comms;
368 hw_data->disable_iov = adf_disable_sriov;
369 hw_data->send_admin_init = adf_send_admin_init;
370 hw_data->init_arb = adf_init_gen2_arb;
371 hw_data->exit_arb = adf_exit_arb;
372 hw_data->get_arb_mapping = adf_get_arbiter_mapping;
373 hw_data->enable_ints = adf_enable_ints;
374 hw_data->set_ssm_wdtimer = adf_set_ssm_wdtimer;
375 hw_data->check_slice_hang = adf_check_slice_hang;
376 hw_data->restore_device = adf_dev_restore;
377 hw_data->reset_device = adf_reset_flr;
378 hw_data->get_objs_num = get_objs_num;
379 hw_data->get_obj_name = get_obj_name;
380 hw_data->get_obj_cfg_ae_mask = get_obj_cfg_ae_mask;
381 hw_data->clock_frequency = ADF_C62X_AE_FREQ;
382 hw_data->measure_clock = measure_clock;
383 hw_data->get_ae_clock = get_ae_clock;
384 hw_data->get_accel_cap = c62x_get_hw_cap;
385 hw_data->reset_device = adf_reset_flr;
386 hw_data->extended_dc_capabilities = 0;
387 hw_data->get_storage_enabled = get_storage_enabled;
388 hw_data->query_storage_cap = 1;
389 hw_data->get_heartbeat_status = adf_get_heartbeat_status;
390 hw_data->get_ae_clock = get_ae_clock;
391 hw_data->storage_enable = 0;
392 hw_data->get_fw_image_type = adf_cfg_get_fw_image_type;
393 hw_data->config_device = adf_config_device;
394 hw_data->get_ring_to_svc_map = adf_cfg_get_services_enabled;
395 hw_data->set_asym_rings_mask = adf_cfg_set_asym_rings_mask;
396 hw_data->ring_to_svc_map = ADF_DEFAULT_RING_TO_SRV_MAP;
397 hw_data->pre_reset = adf_dev_pre_reset;
398 hw_data->post_reset = adf_dev_post_reset;
399
400 adf_gen2_init_hw_csr_info(&hw_data->csr_info);
401 adf_gen2_init_pf_pfvf_ops(&hw_data->csr_info.pfvf_ops);
402 }
403
404 void
adf_clean_hw_data_c62x(struct adf_hw_device_data * hw_data)405 adf_clean_hw_data_c62x(struct adf_hw_device_data *hw_data)
406 {
407 hw_data->dev_class->instances--;
408 }
409