1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2025 Intel Corporation */
3 #include <adf_accel_devices.h>
4 #include <adf_cfg.h>
5 #include <adf_common_drv.h>
6 #include <adf_gen4vf_hw_csr_data.h>
7 #include <adf_gen4_pfvf.h>
8 #include <adf_pfvf_vf_msg.h>
9 #include "adf_4xxxvf_hw_data.h"
10 #include "icp_qat_hw.h"
11 #include "adf_transport_internal.h"
12 #include "adf_pfvf_vf_proto.h"
13
14 static struct adf_hw_device_class adf_4xxxiov_class =
15 { .name = ADF_4XXXVF_DEVICE_NAME, .type = DEV_4XXXVF, .instances = 0 };
16
17 #define ADF_4XXXIOV_DEFAULT_RING_TO_SRV_MAP \
18 (ASYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
19 ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
20 SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
21
22 #define ADF_4XXXIOV_ASYM_SYM ADF_4XXXIOV_DEFAULT_RING_TO_SRV_MAP
23
24 #define ADF_4XXXIOV_DC \
25 (COMP | COMP << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
26 COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
27 COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
28
29 #define ADF_4XXXIOV_SYM \
30 (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
31 SYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
32 SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
33
34 #define ADF_4XXXIOV_ASYM \
35 (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
36 ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
37 ASYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
38
39 #define ADF_4XXXIOV_ASYM_DC \
40 (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
41 COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
42 COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
43
44 #define ADF_4XXXIOV_SYM_DC \
45 (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
46 COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
47 COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
48
49 #define ADF_4XXXIOV_NA \
50 (NA | NA << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
51 NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
52 NA << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
53
54 struct adf_enabled_services {
55 const char svcs_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
56 u16 rng_to_svc_msk;
57 };
58
59 static struct adf_enabled_services adf_4xxxiov_svcs[] =
60 { { "dc", ADF_4XXXIOV_DC },
61 { "sym", ADF_4XXXIOV_SYM },
62 { "asym", ADF_4XXXIOV_ASYM },
63 { "dc;asym", ADF_4XXXIOV_ASYM_DC },
64 { "asym;dc", ADF_4XXXIOV_ASYM_DC },
65 { "sym;dc", ADF_4XXXIOV_SYM_DC },
66 { "dc;sym", ADF_4XXXIOV_SYM_DC },
67 { "asym;sym", ADF_4XXXIOV_ASYM_SYM },
68 { "sym;asym", ADF_4XXXIOV_ASYM_SYM },
69 { "cy", ADF_4XXXIOV_ASYM_SYM } };
70
71 static u32
get_accel_mask(struct adf_accel_dev * accel_dev)72 get_accel_mask(struct adf_accel_dev *accel_dev)
73 {
74 return ADF_4XXXIOV_ACCELERATORS_MASK;
75 }
76
77 static u32
get_ae_mask(struct adf_accel_dev * accel_dev)78 get_ae_mask(struct adf_accel_dev *accel_dev)
79 {
80 return ADF_4XXXIOV_ACCELENGINES_MASK;
81 }
82
83 static u32
get_num_accels(struct adf_hw_device_data * self)84 get_num_accels(struct adf_hw_device_data *self)
85 {
86 return ADF_4XXXIOV_MAX_ACCELERATORS;
87 }
88
89 static u32
get_num_aes(struct adf_hw_device_data * self)90 get_num_aes(struct adf_hw_device_data *self)
91 {
92 return ADF_4XXXIOV_MAX_ACCELENGINES;
93 }
94
95 static u32
get_misc_bar_id(struct adf_hw_device_data * self)96 get_misc_bar_id(struct adf_hw_device_data *self)
97 {
98 return ADF_4XXXIOV_PMISC_BAR;
99 }
100
101 static u32
get_etr_bar_id(struct adf_hw_device_data * self)102 get_etr_bar_id(struct adf_hw_device_data *self)
103 {
104 return ADF_4XXXIOV_ETR_BAR;
105 }
106
107 static u32
get_clock_speed(struct adf_hw_device_data * self)108 get_clock_speed(struct adf_hw_device_data *self)
109 {
110 /* CPP clock is half high-speed clock */
111 return self->clock_frequency / 2;
112 }
113
114 static enum dev_sku_info
get_sku(struct adf_hw_device_data * self)115 get_sku(struct adf_hw_device_data *self)
116 {
117 return DEV_SKU_VF;
118 }
119
120 static int
adf_vf_int_noop(struct adf_accel_dev * accel_dev)121 adf_vf_int_noop(struct adf_accel_dev *accel_dev)
122 {
123 return 0;
124 }
125
126 static void
adf_vf_void_noop(struct adf_accel_dev * accel_dev)127 adf_vf_void_noop(struct adf_accel_dev *accel_dev)
128 {
129 }
130
131 u32
adf_4xxxvf_get_hw_cap(struct adf_accel_dev * accel_dev)132 adf_4xxxvf_get_hw_cap(struct adf_accel_dev *accel_dev)
133 {
134 device_t pdev = accel_dev->accel_pci_dev.pci_dev;
135 u32 vffusectl1;
136 u32 capabilities_sym, capabilities_sym_cipher, capabilities_sym_auth,
137 capabilities_asym, capabilities_dc;
138
139 /* Get fused capabilities */
140 vffusectl1 = pci_read_config(pdev, ADF_4XXXIOV_VFFUSECTL1_OFFSET, 4);
141
142 capabilities_sym_cipher = ICP_ACCEL_CAPABILITIES_HKDF |
143 ICP_ACCEL_CAPABILITIES_SM4 | ICP_ACCEL_CAPABILITIES_CHACHA_POLY |
144 ICP_ACCEL_CAPABILITIES_AESGCM_SPC | ICP_ACCEL_CAPABILITIES_AES_V2;
145 capabilities_sym_auth = ICP_ACCEL_CAPABILITIES_SM3 |
146 ICP_ACCEL_CAPABILITIES_SHA3 | ICP_ACCEL_CAPABILITIES_SHA3_EXT;
147
148 /* A set bit in vffusectl1 means the feature is OFF in this SKU */
149 if (vffusectl1 & ICP_ACCEL_4XXXVF_MASK_CIPHER_SLICE) {
150 capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_HKDF;
151 capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_SM4;
152 }
153
154 if (vffusectl1 & ICP_ACCEL_4XXXVF_MASK_UCS_SLICE) {
155 capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY;
156 capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC;
157 capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_AES_V2;
158 }
159
160 if (vffusectl1 & ICP_ACCEL_4XXXVF_MASK_AUTH_SLICE) {
161 capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SM3;
162 capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SHA3;
163 capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT;
164 }
165
166 if (vffusectl1 & ICP_ACCEL_4XXXVF_MASK_SMX_SLICE) {
167 capabilities_sym_cipher &= ~ICP_ACCEL_CAPABILITIES_SM4;
168 capabilities_sym_auth &= ~ICP_ACCEL_CAPABILITIES_SM3;
169 }
170
171 if (capabilities_sym_cipher)
172 capabilities_sym_cipher |= ICP_ACCEL_CAPABILITIES_CIPHER;
173
174 if (capabilities_sym_auth)
175 capabilities_sym_auth |= ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
176
177 capabilities_sym = capabilities_sym_cipher | capabilities_sym_auth;
178
179 if (capabilities_sym)
180 capabilities_sym |= ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
181
182 capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
183 ICP_ACCEL_CAPABILITIES_SM2 | ICP_ACCEL_CAPABILITIES_ECEDMONT;
184
185 if (vffusectl1 & ICP_ACCEL_4XXXVF_MASK_PKE_SLICE) {
186 capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
187 capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2;
188 capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT;
189 }
190
191 capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION |
192 ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION |
193 ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION |
194 ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
195
196 if (vffusectl1 & ICP_ACCEL_4XXXVF_MASK_COMPRESS_SLICE) {
197 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
198 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION;
199 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
200 capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64;
201 }
202
203 return capabilities_sym | capabilities_dc | capabilities_asym;
204 }
205
206 static void
adf_set_asym_rings_mask(struct adf_accel_dev * accel_dev)207 adf_set_asym_rings_mask(struct adf_accel_dev *accel_dev)
208 {
209 accel_dev->hw_device->asym_rings_mask = ADF_4XXX_DEF_ASYM_MASK;
210 }
211
212 static void
enable_pf2vm_interrupt(struct adf_accel_dev * accel_dev)213 enable_pf2vm_interrupt(struct adf_accel_dev *accel_dev)
214 {
215 struct adf_hw_device_data *hw_data;
216 struct adf_bar *pmisc;
217 struct resource *pmisc_bar_addr;
218
219 hw_data = accel_dev->hw_device;
220 pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
221 pmisc_bar_addr = pmisc->virt_addr;
222
223 ADF_CSR_WR(pmisc_bar_addr, ADF_4XXXIOV_VINTMSKPF2VM_OFFSET, 0x0);
224 }
225
226 static void
disable_pf2vm_interrupt(struct adf_accel_dev * accel_dev)227 disable_pf2vm_interrupt(struct adf_accel_dev *accel_dev)
228 {
229 struct adf_hw_device_data *hw_data;
230 struct adf_bar *pmisc;
231 struct resource *pmisc_bar_addr;
232
233 hw_data = accel_dev->hw_device;
234 pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
235 pmisc_bar_addr = pmisc->virt_addr;
236
237 ADF_CSR_WR(pmisc_bar_addr, ADF_4XXXIOV_VINTMSKPF2VM_OFFSET, BIT(0));
238 }
239
240 static int
interrupt_active_pf2vm(struct adf_accel_dev * accel_dev)241 interrupt_active_pf2vm(struct adf_accel_dev *accel_dev)
242 {
243 struct adf_hw_device_data *hw_data;
244 struct adf_bar *pmisc;
245 struct resource *pmisc_bar_addr;
246 u32 v_sou, v_msk;
247
248 hw_data = accel_dev->hw_device;
249 pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
250 pmisc_bar_addr = pmisc->virt_addr;
251
252 v_sou = ADF_CSR_RD(pmisc_bar_addr, ADF_4XXXIOV_VINTSOUPF2VM_OFFSET);
253 v_msk = ADF_CSR_RD(pmisc_bar_addr, ADF_4XXXIOV_VINTMSKPF2VM_OFFSET);
254
255 return ((v_sou & ~v_msk) & BIT(0)) ? 1 : 0;
256 }
257
258 static int
get_int_active_bundles(struct adf_accel_dev * accel_dev)259 get_int_active_bundles(struct adf_accel_dev *accel_dev)
260 {
261 struct adf_hw_device_data *hw_data;
262 struct adf_bar *pmisc;
263 struct resource *pmisc_bar_addr;
264 u32 v_sou, v_msk;
265
266 hw_data = accel_dev->hw_device;
267 pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
268 pmisc_bar_addr = pmisc->virt_addr;
269
270 v_sou = ADF_CSR_RD(pmisc_bar_addr, ADF_4XXXIOV_VINTSOU_OFFSET);
271 v_msk = ADF_CSR_RD(pmisc_bar_addr, ADF_4XXXIOV_VINTMSK_OFFSET);
272
273 return v_sou & ~v_msk & 0xF;
274 }
275
276 static void
get_ring_svc_map_data(int ring_pair_index,u16 ring_to_svc_map,u8 * serv_type,int * ring_index,int * num_rings_per_srv,int bank_num)277 get_ring_svc_map_data(int ring_pair_index,
278 u16 ring_to_svc_map,
279 u8 *serv_type,
280 int *ring_index,
281 int *num_rings_per_srv,
282 int bank_num)
283 {
284 *serv_type =
285 GET_SRV_TYPE(ring_to_svc_map, bank_num % ADF_CFG_NUM_SERVICES);
286 *ring_index = 0;
287 *num_rings_per_srv = ADF_4XXXIOV_NUM_RINGS_PER_BANK / 2;
288 }
289
290 static int
get_ring_to_svc_map(struct adf_accel_dev * accel_dev,u16 * ring_to_svc_map)291 get_ring_to_svc_map(struct adf_accel_dev *accel_dev, u16 *ring_to_svc_map)
292 {
293 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
294 char val[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
295 u32 i = 0;
296
297 if (accel_dev->hw_device->get_ring_to_svc_done)
298 return 0;
299
300 /* Get the services enabled by user if provided.
301 * The function itself will also be called during the driver probe
302 * procedure where no ServicesEnable is provided. Then the device
303 * should still start with default configuration without
304 * ServicesEnable. Hence it still returns 0 when the
305 * adf_cfg_get_param_value() function returns failure.
306 */
307 snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
308 if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
309 return 0;
310
311 for (i = 0; i < ARRAY_SIZE(adf_4xxxiov_svcs); i++) {
312 if (!strncmp(val,
313 adf_4xxxiov_svcs[i].svcs_enabled,
314 ADF_CFG_MAX_KEY_LEN_IN_BYTES)) {
315 *ring_to_svc_map = adf_4xxxiov_svcs[i].rng_to_svc_msk;
316 return 0;
317 }
318 }
319
320 device_printf(GET_DEV(accel_dev),
321 "Invalid services enabled: %s\n",
322 val);
323 return EFAULT;
324 }
325
326 static int
adf_4xxxvf_ring_pair_reset(struct adf_accel_dev * accel_dev,u32 bank_number)327 adf_4xxxvf_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
328 {
329 struct pfvf_message req = { 0 };
330 unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT);
331 int ret = 0;
332
333 if (bank_number >= accel_dev->hw_device->num_banks)
334 return EINVAL;
335
336 req.type = ADF_VF2PF_MSGTYPE_RP_RESET;
337 req.data = bank_number;
338 mutex_lock(&accel_dev->u1.vf.rpreset_lock);
339 init_completion(&accel_dev->u1.vf.msg_received);
340 accel_dev->u1.vf.rpreset_sts = RPRESET_SUCCESS;
341 if (adf_send_vf2pf_msg(accel_dev, req)) {
342 device_printf(GET_DEV(accel_dev),
343 "vf ring pair reset failure (vf2pf msg error)\n");
344 ret = EFAULT;
345 goto out;
346 }
347 if (!wait_for_completion_timeout(&accel_dev->u1.vf.msg_received,
348 timeout)) {
349 device_printf(
350 GET_DEV(accel_dev),
351 "vf ring pair reset failure (pf2vf msg timeout)\n");
352 ret = EFAULT;
353 goto out;
354 }
355 if (accel_dev->u1.vf.rpreset_sts != RPRESET_SUCCESS) {
356 device_printf(
357 GET_DEV(accel_dev),
358 "vf ring pair reset failure (pf reports error)\n");
359 ret = EFAULT;
360 goto out;
361 }
362
363 out:
364 mutex_unlock(&accel_dev->u1.vf.rpreset_lock);
365 return ret;
366 }
367
368 void
adf_init_hw_data_4xxxiov(struct adf_hw_device_data * hw_data)369 adf_init_hw_data_4xxxiov(struct adf_hw_device_data *hw_data)
370 {
371 hw_data->dev_class = &adf_4xxxiov_class;
372 hw_data->num_banks = ADF_4XXXIOV_ETR_MAX_BANKS;
373 hw_data->num_rings_per_bank = ADF_4XXXIOV_NUM_RINGS_PER_BANK;
374 hw_data->num_accel = ADF_4XXXIOV_MAX_ACCELERATORS;
375 hw_data->num_logical_accel = 1;
376 hw_data->num_engines = ADF_4XXXIOV_MAX_ACCELENGINES;
377 hw_data->tx_rx_gap = ADF_4XXXIOV_RX_RINGS_OFFSET;
378 hw_data->tx_rings_mask = ADF_4XXXIOV_TX_RINGS_MASK;
379 hw_data->ring_to_svc_map = ADF_4XXXIOV_DEFAULT_RING_TO_SRV_MAP;
380 hw_data->alloc_irq = adf_vf_isr_resource_alloc;
381 hw_data->free_irq = adf_vf_isr_resource_free;
382 hw_data->enable_error_correction = adf_vf_void_noop;
383 hw_data->init_admin_comms = adf_vf_int_noop;
384 hw_data->exit_admin_comms = adf_vf_void_noop;
385 hw_data->send_admin_init = adf_vf2pf_notify_init;
386 hw_data->init_arb = adf_vf_int_noop;
387 hw_data->exit_arb = adf_vf_void_noop;
388 hw_data->disable_iov = adf_vf2pf_notify_shutdown;
389 hw_data->get_accel_mask = get_accel_mask;
390 hw_data->get_ae_mask = get_ae_mask;
391 hw_data->get_num_accels = get_num_accels;
392 hw_data->get_num_aes = get_num_aes;
393 hw_data->get_etr_bar_id = get_etr_bar_id;
394 hw_data->get_misc_bar_id = get_misc_bar_id;
395 hw_data->get_clock_speed = get_clock_speed;
396 hw_data->get_sku = get_sku;
397 hw_data->enable_ints = adf_vf_void_noop;
398 hw_data->reset_device = adf_reset_flr;
399 hw_data->restore_device = adf_dev_restore;
400 hw_data->get_ring_svc_map_data = get_ring_svc_map_data;
401 hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
402 hw_data->get_accel_cap = adf_4xxxvf_get_hw_cap;
403 hw_data->config_device = adf_config_device;
404 hw_data->set_asym_rings_mask = adf_set_asym_rings_mask;
405 hw_data->ring_pair_reset = adf_4xxxvf_ring_pair_reset;
406 hw_data->enable_pf2vf_interrupt = enable_pf2vm_interrupt;
407 hw_data->disable_pf2vf_interrupt = disable_pf2vm_interrupt;
408 hw_data->interrupt_active_pf2vf = interrupt_active_pf2vm;
409 hw_data->get_int_active_bundles = get_int_active_bundles;
410 hw_data->dev_class->instances++;
411 adf_devmgr_update_class_index(hw_data);
412 gen4vf_init_hw_csr_info(&hw_data->csr_info);
413 adf_gen4_init_vf_pfvf_ops(&hw_data->csr_info.pfvf_ops);
414 }
415
416 void
adf_clean_hw_data_4xxxiov(struct adf_hw_device_data * hw_data)417 adf_clean_hw_data_4xxxiov(struct adf_hw_device_data *hw_data)
418 {
419 hw_data->dev_class->instances--;
420 adf_devmgr_update_class_index(hw_data);
421 }
422