1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 #include <adf_accel_devices.h>
4 #include <adf_cfg.h>
5 #include <adf_common_drv.h>
6 #include <adf_gen4vf_hw_csr_data.h>
7 #include <adf_gen4_pfvf.h>
8 #include <adf_pfvf_vf_msg.h>
9 #include "adf_4xxxvf_hw_data.h"
10 #include "icp_qat_hw.h"
11 #include "adf_transport_internal.h"
12 #include "adf_pfvf_vf_proto.h"
13
14 static struct adf_hw_device_class adf_4xxxiov_class =
15 { .name = ADF_4XXXVF_DEVICE_NAME, .type = DEV_4XXXVF, .instances = 0 };
16
17 #define ADF_4XXXIOV_DEFAULT_RING_TO_SRV_MAP \
18 (ASYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
19 ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
20 SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
21
22 #define ADF_4XXXIOV_ASYM_SYM ADF_4XXXIOV_DEFAULT_RING_TO_SRV_MAP
23
24 #define ADF_4XXXIOV_DC \
25 (COMP | COMP << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
26 COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
27 COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
28
29 #define ADF_4XXXIOV_SYM \
30 (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
31 SYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
32 SYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
33
34 #define ADF_4XXXIOV_ASYM \
35 (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
36 ASYM << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
37 ASYM << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
38
39 #define ADF_4XXXIOV_ASYM_DC \
40 (ASYM | ASYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
41 COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
42 COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
43
44 #define ADF_4XXXIOV_SYM_DC \
45 (SYM | SYM << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
46 COMP << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
47 COMP << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
48
49 #define ADF_4XXXIOV_NA \
50 (NA | NA << ADF_CFG_SERV_RING_PAIR_1_SHIFT | \
51 NA << ADF_CFG_SERV_RING_PAIR_2_SHIFT | \
52 NA << ADF_CFG_SERV_RING_PAIR_3_SHIFT)
53
54 struct adf_enabled_services {
55 const char svcs_enabled[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
56 u16 rng_to_svc_msk;
57 };
58
59 static struct adf_enabled_services adf_4xxxiov_svcs[] =
60 { { "dc", ADF_4XXXIOV_DC },
61 { "sym", ADF_4XXXIOV_SYM },
62 { "asym", ADF_4XXXIOV_ASYM },
63 { "dc;asym", ADF_4XXXIOV_ASYM_DC },
64 { "asym;dc", ADF_4XXXIOV_ASYM_DC },
65 { "sym;dc", ADF_4XXXIOV_SYM_DC },
66 { "dc;sym", ADF_4XXXIOV_SYM_DC },
67 { "asym;sym", ADF_4XXXIOV_ASYM_SYM },
68 { "sym;asym", ADF_4XXXIOV_ASYM_SYM },
69 { "cy", ADF_4XXXIOV_ASYM_SYM } };
70
71 static u32
get_accel_mask(struct adf_accel_dev * accel_dev)72 get_accel_mask(struct adf_accel_dev *accel_dev)
73 {
74 return ADF_4XXXIOV_ACCELERATORS_MASK;
75 }
76
77 static u32
get_ae_mask(struct adf_accel_dev * accel_dev)78 get_ae_mask(struct adf_accel_dev *accel_dev)
79 {
80 return ADF_4XXXIOV_ACCELENGINES_MASK;
81 }
82
83 static u32
get_num_accels(struct adf_hw_device_data * self)84 get_num_accels(struct adf_hw_device_data *self)
85 {
86 return ADF_4XXXIOV_MAX_ACCELERATORS;
87 }
88
89 static u32
get_num_aes(struct adf_hw_device_data * self)90 get_num_aes(struct adf_hw_device_data *self)
91 {
92 return ADF_4XXXIOV_MAX_ACCELENGINES;
93 }
94
95 static u32
get_misc_bar_id(struct adf_hw_device_data * self)96 get_misc_bar_id(struct adf_hw_device_data *self)
97 {
98 return ADF_4XXXIOV_PMISC_BAR;
99 }
100
101 static u32
get_etr_bar_id(struct adf_hw_device_data * self)102 get_etr_bar_id(struct adf_hw_device_data *self)
103 {
104 return ADF_4XXXIOV_ETR_BAR;
105 }
106
107 static u32
get_clock_speed(struct adf_hw_device_data * self)108 get_clock_speed(struct adf_hw_device_data *self)
109 {
110 /* CPP clock is half high-speed clock */
111 return self->clock_frequency / 2;
112 }
113
114 static enum dev_sku_info
get_sku(struct adf_hw_device_data * self)115 get_sku(struct adf_hw_device_data *self)
116 {
117 return DEV_SKU_VF;
118 }
119
120 static int
adf_vf_int_noop(struct adf_accel_dev * accel_dev)121 adf_vf_int_noop(struct adf_accel_dev *accel_dev)
122 {
123 return 0;
124 }
125
126 static void
adf_vf_void_noop(struct adf_accel_dev * accel_dev)127 adf_vf_void_noop(struct adf_accel_dev *accel_dev)
128 {
129 }
130
131 u32
adf_4xxxvf_get_hw_cap(struct adf_accel_dev * accel_dev)132 adf_4xxxvf_get_hw_cap(struct adf_accel_dev *accel_dev)
133 {
134 device_t pdev = accel_dev->accel_pci_dev.pci_dev;
135 u32 vffusectl1;
136 u32 capabilities;
137
138 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC +
139 ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC +
140 ICP_ACCEL_CAPABILITIES_CIPHER +
141 ICP_ACCEL_CAPABILITIES_AUTHENTICATION +
142 ICP_ACCEL_CAPABILITIES_COMPRESSION +
143 ICP_ACCEL_CAPABILITIES_SHA3_EXT + ICP_ACCEL_CAPABILITIES_SM2 +
144 ICP_ACCEL_CAPABILITIES_SM3 + ICP_ACCEL_CAPABILITIES_SM4 +
145 ICP_ACCEL_CAPABILITIES_CHACHA_POLY +
146 ICP_ACCEL_CAPABILITIES_AESGCM_SPC +
147 ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 +
148 ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION +
149 ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION;
150
151 /* Get fused capabilities */
152 vffusectl1 = pci_read_config(pdev, ADF_4XXXIOV_VFFUSECTL1_OFFSET, 4);
153
154 if (vffusectl1 & BIT(7)) {
155 capabilities &=
156 ~(ICP_ACCEL_CAPABILITIES_SM3 + ICP_ACCEL_CAPABILITIES_SM4);
157 }
158 if (vffusectl1 & BIT(6)) {
159 capabilities &= ~ICP_ACCEL_CAPABILITIES_SM3;
160 }
161 if (vffusectl1 & BIT(3)) {
162 capabilities &= ~(ICP_ACCEL_CAPABILITIES_COMPRESSION +
163 ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64);
164 }
165 if (vffusectl1 & BIT(2)) {
166 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
167 }
168 if (vffusectl1 & BIT(1)) {
169 capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
170 }
171 if (vffusectl1 & BIT(0)) {
172 capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
173 }
174 return capabilities;
175 }
176
177 static void
adf_set_asym_rings_mask(struct adf_accel_dev * accel_dev)178 adf_set_asym_rings_mask(struct adf_accel_dev *accel_dev)
179 {
180 accel_dev->hw_device->asym_rings_mask = ADF_4XXX_DEF_ASYM_MASK;
181 }
182
183 static void
enable_pf2vm_interrupt(struct adf_accel_dev * accel_dev)184 enable_pf2vm_interrupt(struct adf_accel_dev *accel_dev)
185 {
186 struct adf_hw_device_data *hw_data;
187 struct adf_bar *pmisc;
188 struct resource *pmisc_bar_addr;
189
190 hw_data = accel_dev->hw_device;
191 pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
192 pmisc_bar_addr = pmisc->virt_addr;
193
194 ADF_CSR_WR(pmisc_bar_addr, ADF_4XXXIOV_VINTMSKPF2VM_OFFSET, 0x0);
195 }
196
197 static void
disable_pf2vm_interrupt(struct adf_accel_dev * accel_dev)198 disable_pf2vm_interrupt(struct adf_accel_dev *accel_dev)
199 {
200 struct adf_hw_device_data *hw_data;
201 struct adf_bar *pmisc;
202 struct resource *pmisc_bar_addr;
203
204 hw_data = accel_dev->hw_device;
205 pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
206 pmisc_bar_addr = pmisc->virt_addr;
207
208 ADF_CSR_WR(pmisc_bar_addr, ADF_4XXXIOV_VINTMSKPF2VM_OFFSET, BIT(0));
209 }
210
211 static int
interrupt_active_pf2vm(struct adf_accel_dev * accel_dev)212 interrupt_active_pf2vm(struct adf_accel_dev *accel_dev)
213 {
214 struct adf_hw_device_data *hw_data;
215 struct adf_bar *pmisc;
216 struct resource *pmisc_bar_addr;
217 u32 v_sou, v_msk;
218
219 hw_data = accel_dev->hw_device;
220 pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
221 pmisc_bar_addr = pmisc->virt_addr;
222
223 v_sou = ADF_CSR_RD(pmisc_bar_addr, ADF_4XXXIOV_VINTSOUPF2VM_OFFSET);
224 v_msk = ADF_CSR_RD(pmisc_bar_addr, ADF_4XXXIOV_VINTMSKPF2VM_OFFSET);
225
226 return ((v_sou & ~v_msk) & BIT(0)) ? 1 : 0;
227 }
228
229 static int
get_int_active_bundles(struct adf_accel_dev * accel_dev)230 get_int_active_bundles(struct adf_accel_dev *accel_dev)
231 {
232 struct adf_hw_device_data *hw_data;
233 struct adf_bar *pmisc;
234 struct resource *pmisc_bar_addr;
235 u32 v_sou, v_msk;
236
237 hw_data = accel_dev->hw_device;
238 pmisc = &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
239 pmisc_bar_addr = pmisc->virt_addr;
240
241 v_sou = ADF_CSR_RD(pmisc_bar_addr, ADF_4XXXIOV_VINTSOU_OFFSET);
242 v_msk = ADF_CSR_RD(pmisc_bar_addr, ADF_4XXXIOV_VINTMSK_OFFSET);
243
244 return v_sou & ~v_msk & 0xF;
245 }
246
247 static void
get_ring_svc_map_data(int ring_pair_index,u16 ring_to_svc_map,u8 * serv_type,int * ring_index,int * num_rings_per_srv,int bank_num)248 get_ring_svc_map_data(int ring_pair_index,
249 u16 ring_to_svc_map,
250 u8 *serv_type,
251 int *ring_index,
252 int *num_rings_per_srv,
253 int bank_num)
254 {
255 *serv_type =
256 GET_SRV_TYPE(ring_to_svc_map, bank_num % ADF_CFG_NUM_SERVICES);
257 *ring_index = 0;
258 *num_rings_per_srv = ADF_4XXXIOV_NUM_RINGS_PER_BANK / 2;
259 }
260
261 static int
get_ring_to_svc_map(struct adf_accel_dev * accel_dev,u16 * ring_to_svc_map)262 get_ring_to_svc_map(struct adf_accel_dev *accel_dev, u16 *ring_to_svc_map)
263 {
264 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
265 char val[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
266 u32 i = 0;
267
268 /* Get the services enabled by user if provided.
269 * The function itself will also be called during the driver probe
270 * procedure where no ServicesEnable is provided. Then the device
271 * should still start with default configuration without
272 * ServicesEnable. Hence it still returns 0 when the
273 * adf_cfg_get_param_value() function returns failure.
274 */
275 snprintf(key, sizeof(key), ADF_SERVICES_ENABLED);
276 if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, key, val))
277 return 0;
278
279 for (i = 0; i < ARRAY_SIZE(adf_4xxxiov_svcs); i++) {
280 if (!strncmp(val,
281 adf_4xxxiov_svcs[i].svcs_enabled,
282 ADF_CFG_MAX_KEY_LEN_IN_BYTES)) {
283 *ring_to_svc_map = adf_4xxxiov_svcs[i].rng_to_svc_msk;
284 return 0;
285 }
286 }
287
288 device_printf(GET_DEV(accel_dev),
289 "Invalid services enabled: %s\n",
290 val);
291 return EFAULT;
292 }
293
294 static int
adf_4xxxvf_ring_pair_reset(struct adf_accel_dev * accel_dev,u32 bank_number)295 adf_4xxxvf_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number)
296 {
297 struct pfvf_message req = { 0 };
298 unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT);
299 int ret = 0;
300
301 if (bank_number >= accel_dev->hw_device->num_banks)
302 return EINVAL;
303
304 req.type = ADF_VF2PF_MSGTYPE_RP_RESET;
305 req.data = bank_number;
306 mutex_lock(&accel_dev->u1.vf.rpreset_lock);
307 init_completion(&accel_dev->u1.vf.msg_received);
308 accel_dev->u1.vf.rpreset_sts = RPRESET_SUCCESS;
309 if (adf_send_vf2pf_msg(accel_dev, req)) {
310 device_printf(GET_DEV(accel_dev),
311 "vf ring pair reset failure (vf2pf msg error)\n");
312 ret = EFAULT;
313 goto out;
314 }
315 if (!wait_for_completion_timeout(&accel_dev->u1.vf.msg_received,
316 timeout)) {
317 device_printf(
318 GET_DEV(accel_dev),
319 "vf ring pair reset failure (pf2vf msg timeout)\n");
320 ret = EFAULT;
321 goto out;
322 }
323 if (accel_dev->u1.vf.rpreset_sts != RPRESET_SUCCESS) {
324 device_printf(
325 GET_DEV(accel_dev),
326 "vf ring pair reset failure (pf reports error)\n");
327 ret = EFAULT;
328 goto out;
329 }
330
331 out:
332 mutex_unlock(&accel_dev->u1.vf.rpreset_lock);
333 return ret;
334 }
335
336 void
adf_init_hw_data_4xxxiov(struct adf_hw_device_data * hw_data)337 adf_init_hw_data_4xxxiov(struct adf_hw_device_data *hw_data)
338 {
339 hw_data->dev_class = &adf_4xxxiov_class;
340 hw_data->num_banks = ADF_4XXXIOV_ETR_MAX_BANKS;
341 hw_data->num_rings_per_bank = ADF_4XXXIOV_NUM_RINGS_PER_BANK;
342 hw_data->num_accel = ADF_4XXXIOV_MAX_ACCELERATORS;
343 hw_data->num_logical_accel = 1;
344 hw_data->num_engines = ADF_4XXXIOV_MAX_ACCELENGINES;
345 hw_data->tx_rx_gap = ADF_4XXXIOV_RX_RINGS_OFFSET;
346 hw_data->tx_rings_mask = ADF_4XXXIOV_TX_RINGS_MASK;
347 hw_data->ring_to_svc_map = ADF_4XXXIOV_DEFAULT_RING_TO_SRV_MAP;
348 hw_data->alloc_irq = adf_vf_isr_resource_alloc;
349 hw_data->free_irq = adf_vf_isr_resource_free;
350 hw_data->enable_error_correction = adf_vf_void_noop;
351 hw_data->init_admin_comms = adf_vf_int_noop;
352 hw_data->exit_admin_comms = adf_vf_void_noop;
353 hw_data->send_admin_init = adf_vf2pf_notify_init;
354 hw_data->init_arb = adf_vf_int_noop;
355 hw_data->exit_arb = adf_vf_void_noop;
356 hw_data->disable_iov = adf_vf2pf_notify_shutdown;
357 hw_data->get_accel_mask = get_accel_mask;
358 hw_data->get_ae_mask = get_ae_mask;
359 hw_data->get_num_accels = get_num_accels;
360 hw_data->get_num_aes = get_num_aes;
361 hw_data->get_etr_bar_id = get_etr_bar_id;
362 hw_data->get_misc_bar_id = get_misc_bar_id;
363 hw_data->get_clock_speed = get_clock_speed;
364 hw_data->get_sku = get_sku;
365 hw_data->enable_ints = adf_vf_void_noop;
366 hw_data->reset_device = adf_reset_flr;
367 hw_data->restore_device = adf_dev_restore;
368 hw_data->get_ring_svc_map_data = get_ring_svc_map_data;
369 hw_data->get_ring_to_svc_map = get_ring_to_svc_map;
370 hw_data->get_accel_cap = adf_4xxxvf_get_hw_cap;
371 hw_data->config_device = adf_config_device;
372 hw_data->set_asym_rings_mask = adf_set_asym_rings_mask;
373 hw_data->ring_pair_reset = adf_4xxxvf_ring_pair_reset;
374 hw_data->enable_pf2vf_interrupt = enable_pf2vm_interrupt;
375 hw_data->disable_pf2vf_interrupt = disable_pf2vm_interrupt;
376 hw_data->interrupt_active_pf2vf = interrupt_active_pf2vm;
377 hw_data->get_int_active_bundles = get_int_active_bundles;
378 hw_data->dev_class->instances++;
379 adf_devmgr_update_class_index(hw_data);
380 gen4vf_init_hw_csr_info(&hw_data->csr_info);
381 adf_gen4_init_vf_pfvf_ops(&hw_data->csr_info.pfvf_ops);
382 }
383
384 void
adf_clean_hw_data_4xxxiov(struct adf_hw_device_data * hw_data)385 adf_clean_hw_data_4xxxiov(struct adf_hw_device_data *hw_data)
386 {
387 hw_data->dev_class->instances--;
388 adf_devmgr_update_class_index(hw_data);
389 }
390