1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2025 Intel Corporation */
3 #include "qat_freebsd.h"
4 #include "adf_cfg.h"
5 #include "adf_common_drv.h"
6 #include "adf_accel_devices.h"
7 #include "adf_4xxx_hw_data.h"
8 #include "adf_gen4_hw_data.h"
9 #include "adf_fw_counters.h"
10 #include "adf_cfg_device.h"
11 #include "adf_dbgfs.h"
12 #include <sys/types.h>
13 #include <sys/kernel.h>
14 #include <sys/malloc.h>
15 #include <machine/bus_dma.h>
16 #include <dev/pci/pcireg.h>
17
18 static MALLOC_DEFINE(M_QAT_4XXX, "qat_4xxx", "qat_4xxx");
19
20 #define ADF_SYSTEM_DEVICE(device_id) \
21 { \
22 PCI_VENDOR_ID_INTEL, device_id \
23 }
24
25 static const struct pci_device_id adf_pci_tbl[] =
26 { ADF_SYSTEM_DEVICE(ADF_4XXX_PCI_DEVICE_ID),
27 ADF_SYSTEM_DEVICE(ADF_401XX_PCI_DEVICE_ID),
28 {
29 0,
30 } };
31
32 static int
adf_probe(device_t dev)33 adf_probe(device_t dev)
34 {
35 const struct pci_device_id *id;
36
37 for (id = adf_pci_tbl; id->vendor != 0; id++) {
38 if (pci_get_vendor(dev) == id->vendor &&
39 pci_get_device(dev) == id->device) {
40 device_set_desc(dev,
41 "Intel " ADF_4XXX_DEVICE_NAME
42 " QuickAssist");
43 return BUS_PROBE_GENERIC;
44 }
45 }
46 return ENXIO;
47 }
48
49 #ifdef QAT_DISABLE_SAFE_DC_MODE
adf_4xxx_sysctl_disable_safe_dc_mode(SYSCTL_HANDLER_ARGS)50 static int adf_4xxx_sysctl_disable_safe_dc_mode(SYSCTL_HANDLER_ARGS)
51 {
52 struct adf_accel_dev *accel_dev = arg1;
53 int error, value = accel_dev->disable_safe_dc_mode;
54
55 error = sysctl_handle_int(oidp, &value, 0, req);
56 if (error || !req->newptr)
57 return error;
58
59 if (value != 1 && value != 0)
60 return EINVAL;
61
62 if (adf_dev_started(accel_dev)) {
63 device_printf(
64 GET_DEV(accel_dev),
65 "QAT: configuration can only be changed in \"down\" device state\n");
66 return EBUSY;
67 }
68
69 accel_dev->disable_safe_dc_mode = (u8)value;
70
71 return 0;
72 }
73
74 static void
adf_4xxx_disable_safe_dc_sysctl_add(struct adf_accel_dev * accel_dev)75 adf_4xxx_disable_safe_dc_sysctl_add(struct adf_accel_dev *accel_dev)
76 {
77 struct sysctl_ctx_list *qat_sysctl_ctx;
78 struct sysctl_oid *qat_sysctl_tree;
79
80 qat_sysctl_ctx =
81 device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev);
82 qat_sysctl_tree =
83 device_get_sysctl_tree(accel_dev->accel_pci_dev.pci_dev);
84 accel_dev->safe_dc_mode =
85 SYSCTL_ADD_OID(qat_sysctl_ctx,
86 SYSCTL_CHILDREN(qat_sysctl_tree),
87 OID_AUTO,
88 "disable_safe_dc_mode",
89 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_TUN |
90 CTLFLAG_SKIP,
91 accel_dev,
92 0,
93 adf_4xxx_sysctl_disable_safe_dc_mode,
94 "LU",
95 "Disable QAT safe data compression mode");
96 }
97
98 static void
adf_4xxx_disable_safe_dc_sysctl_remove(struct adf_accel_dev * accel_dev)99 adf_4xxx_disable_safe_dc_sysctl_remove(struct adf_accel_dev *accel_dev)
100 {
101 int ret;
102 struct sysctl_ctx_list *qat_sysctl_ctx =
103 device_get_sysctl_ctx(accel_dev->accel_pci_dev.pci_dev);
104
105 ret = sysctl_ctx_entry_del(qat_sysctl_ctx, accel_dev->safe_dc_mode);
106 if (ret) {
107 device_printf(GET_DEV(accel_dev), "Failed to delete entry\n");
108 } else {
109 ret = sysctl_remove_oid(accel_dev->safe_dc_mode, 1, 1);
110 if (ret)
111 device_printf(GET_DEV(accel_dev),
112 "Failed to delete oid\n");
113 }
114 }
115 #endif /* QAT_DISABLE_SAFE_DC_MODE */
116
117 static void
adf_cleanup_accel(struct adf_accel_dev * accel_dev)118 adf_cleanup_accel(struct adf_accel_dev *accel_dev)
119 {
120 struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
121 int i;
122
123 if (accel_dev->dma_tag)
124 bus_dma_tag_destroy(accel_dev->dma_tag);
125 for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
126 struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
127
128 if (bar->virt_addr)
129 bus_free_resource(accel_pci_dev->pci_dev,
130 SYS_RES_MEMORY,
131 bar->virt_addr);
132 }
133
134 if (accel_dev->hw_device) {
135 switch (pci_get_device(accel_pci_dev->pci_dev)) {
136 case ADF_4XXX_PCI_DEVICE_ID:
137 case ADF_401XX_PCI_DEVICE_ID:
138 adf_clean_hw_data_4xxx(accel_dev->hw_device);
139 break;
140 default:
141 break;
142 }
143 free(accel_dev->hw_device, M_QAT_4XXX);
144 accel_dev->hw_device = NULL;
145 }
146 #ifdef QAT_DISABLE_SAFE_DC_MODE
147 adf_4xxx_disable_safe_dc_sysctl_remove(accel_dev);
148 #endif /* QAT_DISABLE_SAFE_DC_MODE */
149 adf_dbgfs_exit(accel_dev);
150 adf_cfg_dev_remove(accel_dev);
151 adf_devmgr_rm_dev(accel_dev, NULL);
152 }
153
154 static int
adf_attach(device_t dev)155 adf_attach(device_t dev)
156 {
157 struct adf_accel_dev *accel_dev;
158 struct adf_accel_pci *accel_pci_dev;
159 struct adf_hw_device_data *hw_data;
160 unsigned int bar_nr;
161 int ret = 0, rid;
162 struct adf_cfg_device *cfg_dev = NULL;
163
164 /* Set pci MaxPayLoad to 512. Implemented to avoid the issue of
165 * Pci-passthrough causing Maxpayload to be reset to 128 bytes
166 * when the device is reset.
167 */
168 if (pci_get_max_payload(dev) != 512)
169 pci_set_max_payload(dev, 512);
170
171 accel_dev = device_get_softc(dev);
172
173 mutex_init(&accel_dev->lock);
174 INIT_LIST_HEAD(&accel_dev->crypto_list);
175 accel_pci_dev = &accel_dev->accel_pci_dev;
176 accel_pci_dev->pci_dev = dev;
177
178 if (bus_get_domain(dev, &accel_pci_dev->node) != 0)
179 accel_pci_dev->node = 0;
180
181 /* Add accel device to accel table.
182 * This should be called before adf_cleanup_accel is called
183 */
184 ret = adf_devmgr_add_dev(accel_dev, NULL);
185 if (ret) {
186 device_printf(dev, "Failed to add new accelerator device.\n");
187 goto out_err_lock;
188 }
189
190 /* Allocate and configure device configuration structure */
191 hw_data = malloc(sizeof(*hw_data), M_QAT_4XXX, M_WAITOK | M_ZERO);
192
193 accel_dev->hw_device = hw_data;
194 adf_init_hw_data_4xxx(accel_dev->hw_device, pci_get_device(dev));
195 accel_pci_dev->revid = pci_get_revid(dev);
196 hw_data->fuses = pci_read_config(dev, ADF_4XXX_FUSECTL4_OFFSET, 4);
197
198 /* Get PPAERUCM values and store */
199 ret = adf_aer_store_ppaerucm_reg(dev, hw_data);
200 if (ret)
201 goto out_err;
202
203 /* Get Accelerators and Accelerators Engines masks */
204 hw_data->accel_mask = hw_data->get_accel_mask(accel_dev);
205 hw_data->ae_mask = hw_data->get_ae_mask(accel_dev);
206
207 accel_pci_dev->sku = hw_data->get_sku(hw_data);
208 /* If the device has no acceleration engines then ignore it. */
209 if (!hw_data->accel_mask || !hw_data->ae_mask ||
210 (~hw_data->ae_mask & 0x01)) {
211 device_printf(dev, "No acceleration units found\n");
212 ret = ENXIO;
213 goto out_err;
214 }
215
216 /* Create device configuration table */
217 ret = adf_cfg_dev_add(accel_dev);
218 if (ret)
219 goto out_err;
220 ret = adf_clock_debugfs_add(accel_dev);
221 if (ret)
222 goto out_err;
223
224 #ifdef QAT_DISABLE_SAFE_DC_MODE
225 adf_4xxx_disable_safe_dc_sysctl_add(accel_dev);
226 #endif /* QAT_DISABLE_SAFE_DC_MODE */
227
228 pci_set_max_read_req(dev, 4096);
229
230 ret = bus_dma_tag_create(bus_get_dma_tag(dev),
231 1,
232 0,
233 BUS_SPACE_MAXADDR,
234 BUS_SPACE_MAXADDR,
235 NULL,
236 NULL,
237 BUS_SPACE_MAXSIZE,
238 /* BUS_SPACE_UNRESTRICTED */ 1,
239 BUS_SPACE_MAXSIZE,
240 0,
241 NULL,
242 NULL,
243 &accel_dev->dma_tag);
244 if (ret)
245 goto out_err;
246
247 if (hw_data->get_accel_cap) {
248 hw_data->accel_capabilities_mask =
249 hw_data->get_accel_cap(accel_dev);
250 }
251
252 /* Find and map all the device's BARS */
253 /* Logical BARs configuration for 64bit BARs:
254 bar 0 and 1 - logical BAR0
255 bar 2 and 3 - logical BAR1
256 bar 4 and 5 - logical BAR3
257 */
258 for (bar_nr = 0;
259 bar_nr < (ADF_PCI_MAX_BARS * 2) && bar_nr < PCIR_MAX_BAR_0;
260 bar_nr += 2) {
261 struct adf_bar *bar;
262
263 rid = PCIR_BAR(bar_nr);
264 bar = &accel_pci_dev->pci_bars[bar_nr / 2];
265
266 bar->virt_addr = bus_alloc_resource_any(dev,
267 SYS_RES_MEMORY,
268 &rid,
269 RF_ACTIVE);
270 if (!bar->virt_addr) {
271 device_printf(dev, "Failed to map BAR %d\n", bar_nr);
272 ret = ENXIO;
273 goto out_err;
274 }
275 bar->base_addr = rman_get_start(bar->virt_addr);
276 bar->size = rman_get_size(bar->virt_addr);
277 }
278 ret = pci_enable_busmaster(dev);
279 if (ret)
280 goto out_err;
281
282 adf_dbgfs_init(accel_dev);
283
284 if (!accel_dev->hw_device->config_device) {
285 ret = EFAULT;
286 goto out_err_disable;
287 }
288
289 ret = accel_dev->hw_device->config_device(accel_dev);
290 if (ret)
291 goto out_err_disable;
292
293 ret = adf_dev_init(accel_dev);
294 if (ret)
295 goto out_dev_shutdown;
296
297 ret = adf_dev_start(accel_dev);
298 if (ret)
299 goto out_dev_stop;
300
301 cfg_dev = accel_dev->cfg->dev;
302 adf_cfg_device_clear(cfg_dev, accel_dev);
303 free(cfg_dev, M_QAT);
304 accel_dev->cfg->dev = NULL;
305 return ret;
306 out_dev_stop:
307 adf_dev_stop(accel_dev);
308 out_dev_shutdown:
309 adf_dev_shutdown(accel_dev);
310 out_err_disable:
311 pci_disable_busmaster(dev);
312 out_err:
313 adf_cleanup_accel(accel_dev);
314 out_err_lock:
315 mutex_destroy(&accel_dev->lock);
316
317 return ret;
318 }
319
320 static int
adf_detach(device_t dev)321 adf_detach(device_t dev)
322 {
323 struct adf_accel_dev *accel_dev = device_get_softc(dev);
324
325 if (adf_dev_stop(accel_dev)) {
326 device_printf(dev, "Failed to stop QAT accel dev\n");
327 return EBUSY;
328 }
329
330 adf_dev_shutdown(accel_dev);
331
332 pci_disable_busmaster(dev);
333 adf_cleanup_accel(accel_dev);
334 mutex_destroy(&accel_dev->lock);
335
336 return 0;
337 }
338
339 static device_method_t adf_methods[] = { DEVMETHOD(device_probe, adf_probe),
340 DEVMETHOD(device_attach, adf_attach),
341 DEVMETHOD(device_detach, adf_detach),
342
343 DEVMETHOD_END };
344
345 static driver_t adf_driver = { "qat",
346 adf_methods,
347 sizeof(struct adf_accel_dev) };
348
349 DRIVER_MODULE_ORDERED(qat_4xxx, pci, adf_driver, NULL, NULL, SI_ORDER_THIRD);
350 MODULE_VERSION(qat_4xxx, 1);
351 MODULE_DEPEND(qat_4xxx, qat_common, 1, 1, 1);
352 MODULE_DEPEND(qat_4xxx, qat_api, 1, 1, 1);
353 MODULE_DEPEND(qat_4xxx, linuxkpi, 1, 1, 1);
354