1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 #include "qat_freebsd.h"
4 #include "adf_cfg.h"
5 #include "adf_common_drv.h"
6 #include "adf_accel_devices.h"
7 #include "icp_qat_uclo.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_init_admin.h"
10 #include "adf_cfg_strings.h"
11 #include "adf_uio_control.h"
12 #include "adf_uio_cleanup.h"
13 #include "adf_uio.h"
14 #include "adf_transport_access_macros.h"
15 #include "adf_transport_internal.h"
16 #include <sys/bus.h>
17 #include <sys/lock.h>
18 #include <sys/kernel.h>
19 #include <sys/module.h>
20 #include <sys/sx.h>
21 #include <sys/malloc.h>
22 #include <machine/atomic.h>
23 #include <dev/pci/pcivar.h>
24 #include <sys/conf.h>
25 #include <sys/systm.h>
26 #include <sys/queue.h>
27 #include <sys/proc.h>
28 #include <sys/types.h>
29 #include <sys/priv.h>
30 #include <linux/list.h>
31 #include "adf_accel_devices.h"
32 #include "adf_common_drv.h"
33 #include "adf_cfg.h"
34 #include "adf_cfg_common.h"
35 #include "adf_cfg_user.h"
36 #include "adf_heartbeat.h"
37 #include "adf_cfg_device.h"
38
39 #define DEVICE_NAME "qat_adf_ctl"
40
41 static struct sx adf_ctl_lock;
42
43 static d_ioctl_t adf_ctl_ioctl;
44
45 void *misc_counter;
46
47 static struct cdevsw adf_ctl_cdevsw = {
48 .d_version = D_VERSION,
49 .d_ioctl = adf_ctl_ioctl,
50 .d_name = DEVICE_NAME,
51 };
52
53 static struct cdev *adf_ctl_dev;
54
adf_chr_drv_destroy(void)55 static void adf_chr_drv_destroy(void)
56 {
57 destroy_dev(adf_ctl_dev);
58 }
59
60 struct adf_user_addr_info {
61 struct list_head list;
62 void *user_addr;
63 };
64
adf_chr_drv_create(void)65 static int adf_chr_drv_create(void)
66 {
67 adf_ctl_dev = make_dev(&adf_ctl_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
68 DEVICE_NAME);
69
70 if (!adf_ctl_dev) {
71 printf("QAT: failed to create device\n");
72 goto err_cdev_del;
73 }
74 return 0;
75 err_cdev_del:
76 return EFAULT;
77 }
78
adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data ** ctl_data,caddr_t arg)79 static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
80 caddr_t arg)
81 {
82 *ctl_data = (struct adf_user_cfg_ctl_data *)arg;
83 return 0;
84 }
85
adf_copy_keyval_to_user(struct adf_accel_dev * accel_dev,struct adf_user_cfg_ctl_data * ctl_data)86 static int adf_copy_keyval_to_user(struct adf_accel_dev *accel_dev,
87 struct adf_user_cfg_ctl_data *ctl_data)
88 {
89 struct adf_user_cfg_key_val key_val;
90 struct adf_user_cfg_section section;
91 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
92 char *user_ptr;
93
94 if (copyin(ctl_data->config_section, §ion,
95 sizeof(struct adf_user_cfg_section))) {
96 device_printf(GET_DEV(accel_dev),
97 "failed to copy section info\n");
98 return EFAULT;
99 }
100
101 if (copyin(section.params, &key_val,
102 sizeof(struct adf_user_cfg_key_val))) {
103 device_printf(GET_DEV(accel_dev), "failed to copy key val\n");
104 return EFAULT;
105 }
106
107 user_ptr = ((char *)section.params) + ADF_CFG_MAX_KEY_LEN_IN_BYTES;
108
109 if (adf_cfg_get_param_value(
110 accel_dev, section.name, key_val.key, val)) {
111 return EFAULT;
112 }
113
114 if (copyout(val, user_ptr,
115 ADF_CFG_MAX_VAL_LEN_IN_BYTES)) {
116 device_printf(GET_DEV(accel_dev),
117 "failed to copy keyvalue to user!\n");
118 return EFAULT;
119 }
120
121 return 0;
122 }
123
adf_ctl_ioctl_get_num_devices(unsigned int cmd,caddr_t arg)124 static int adf_ctl_ioctl_get_num_devices(unsigned int cmd,
125 caddr_t arg)
126 {
127 adf_devmgr_get_num_dev((uint32_t *)arg);
128
129 return 0;
130 }
131
adf_ctl_ioctl_get_status(unsigned int cmd,caddr_t arg)132 static int adf_ctl_ioctl_get_status(unsigned int cmd,
133 caddr_t arg)
134 {
135 struct adf_hw_device_data *hw_data;
136 struct adf_dev_status_info *dev_info;
137 struct adf_accel_dev *accel_dev;
138
139 dev_info = (struct adf_dev_status_info *)arg;
140
141 accel_dev = adf_devmgr_get_dev_by_id(dev_info->accel_id);
142 if (!accel_dev)
143 return ENODEV;
144
145 hw_data = accel_dev->hw_device;
146 dev_info->state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
147 dev_info->num_ae = hw_data->get_num_aes(hw_data);
148 dev_info->num_accel = hw_data->get_num_accels(hw_data);
149 dev_info->num_logical_accel = hw_data->num_logical_accel;
150 dev_info->banks_per_accel = hw_data->num_banks
151 / hw_data->num_logical_accel;
152 strlcpy(dev_info->name, hw_data->dev_class->name,
153 sizeof(dev_info->name));
154 dev_info->instance_id = hw_data->instance_id;
155 dev_info->type = hw_data->dev_class->type;
156 dev_info->bus = pci_get_bus(accel_to_pci_dev(accel_dev));
157 dev_info->dev = pci_get_slot(accel_to_pci_dev(accel_dev));
158 dev_info->fun = pci_get_function(accel_to_pci_dev(accel_dev));
159 dev_info->domain = pci_get_domain(accel_to_pci_dev(accel_dev));
160
161 dev_info->pci_device_id = pci_get_device(accel_to_pci_dev(accel_dev));
162
163 dev_info->node_id = accel_dev->accel_pci_dev.node;
164 dev_info->sku = accel_dev->accel_pci_dev.sku;
165
166 dev_info->device_mem_available = accel_dev->aram_info ?
167 accel_dev->aram_info->inter_buff_aram_region_size : 0;
168
169 return 0;
170 }
171
adf_ctl_ioctl_heartbeat(unsigned int cmd,caddr_t arg)172 static int adf_ctl_ioctl_heartbeat(unsigned int cmd,
173 caddr_t arg)
174 {
175 int ret = 0;
176 struct adf_accel_dev *accel_dev;
177 struct adf_dev_heartbeat_status_ctl *hb_status;
178
179 hb_status = (struct adf_dev_heartbeat_status_ctl *)arg;
180
181 accel_dev = adf_devmgr_get_dev_by_id(hb_status->device_id);
182 if (!accel_dev)
183 return ENODEV;
184
185 if (adf_heartbeat_status(accel_dev, &hb_status->status)) {
186 device_printf(GET_DEV(accel_dev),
187 "failed to get heartbeat status\n");
188 return EAGAIN;
189 }
190 return ret;
191 }
192
adf_ctl_ioctl_dev_get_value(unsigned int cmd,caddr_t arg)193 static int adf_ctl_ioctl_dev_get_value(unsigned int cmd,
194 caddr_t arg)
195 {
196 int ret = 0;
197 struct adf_user_cfg_ctl_data *ctl_data;
198 struct adf_accel_dev *accel_dev;
199
200 ret = adf_ctl_alloc_resources(&ctl_data, arg);
201 if (ret)
202 return ret;
203
204 accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
205 if (!accel_dev) {
206 printf("QAT: Device %d not found\n", ctl_data->device_id);
207 ret = ENODEV;
208 goto out;
209 }
210
211 ret = adf_copy_keyval_to_user(accel_dev, ctl_data);
212 if (ret) {
213 ret = ENODEV;
214 goto out;
215 }
216 out:
217 return ret;
218 }
219
220 static struct adf_uio_control_bundle
adf_ctl_ioctl_bundle(struct adf_user_reserve_ring reserve)221 *adf_ctl_ioctl_bundle(struct adf_user_reserve_ring reserve)
222 {
223 struct adf_accel_dev *accel_dev;
224 struct adf_uio_control_accel *accel;
225 struct adf_uio_control_bundle *bundle = NULL;
226 u8 num_rings_per_bank = 0;
227
228 accel_dev = adf_devmgr_get_dev_by_id(reserve.accel_id);
229 if (!accel_dev) {
230 pr_err("QAT: Failed to get accel_dev\n");
231 return NULL;
232 }
233 num_rings_per_bank = accel_dev->hw_device->num_rings_per_bank;
234
235 accel = accel_dev->accel;
236 if (!accel) {
237 pr_err("QAT: Failed to get accel\n");
238 return NULL;
239 }
240
241 if (reserve.bank_nr >= GET_MAX_BANKS(accel_dev)) {
242 pr_err("QAT: Invalid bank number %d\n", reserve.bank_nr);
243 return NULL;
244 }
245 if (reserve.ring_mask & ~((1 << num_rings_per_bank) - 1)) {
246 pr_err("QAT: Invalid ring mask %0X\n", reserve.ring_mask);
247 return NULL;
248 }
249 if (accel->num_ker_bundles > reserve.bank_nr) {
250 pr_err("QAT: Invalid user reserved bank\n");
251 return NULL;
252 }
253 bundle = &accel->bundle[reserve.bank_nr];
254
255 return bundle;
256 }
257
adf_ctl_ioctl_reserve_ring(caddr_t arg)258 static int adf_ctl_ioctl_reserve_ring(caddr_t arg)
259 {
260 struct adf_user_reserve_ring reserve = {0};
261 struct adf_uio_control_bundle *bundle;
262 struct adf_uio_instance_rings *instance_rings;
263 int pid_entry_found = 0;
264
265 reserve = *((struct adf_user_reserve_ring *)arg);
266
267 bundle = adf_ctl_ioctl_bundle(reserve);
268 if (!bundle) {
269 pr_err("QAT: Failed to get bundle\n");
270 return -EINVAL;
271 }
272
273 mutex_lock(&bundle->lock);
274 if (bundle->rings_used & reserve.ring_mask) {
275 pr_err("QAT: Bundle %d, rings 0x%04X already reserved\n",
276 reserve.bank_nr,
277 reserve.ring_mask);
278 mutex_unlock(&bundle->lock);
279 return -EINVAL;
280 }
281
282 /* Find the list entry for this process */
283 mutex_lock(&bundle->list_lock);
284 list_for_each_entry(instance_rings, &bundle->list, list) {
285 if (instance_rings->user_pid == curproc->p_pid) {
286 pid_entry_found = 1;
287 break;
288 }
289 }
290 mutex_unlock(&bundle->list_lock);
291
292 if (!pid_entry_found) {
293 pr_err("QAT: process %d not found\n", curproc->p_pid);
294 mutex_unlock(&bundle->lock);
295 return -EINVAL;
296 }
297
298 instance_rings->ring_mask |= reserve.ring_mask;
299 bundle->rings_used |= reserve.ring_mask;
300 mutex_unlock(&bundle->lock);
301
302 return 0;
303 }
304
adf_ctl_ioctl_release_ring(caddr_t arg)305 static int adf_ctl_ioctl_release_ring(caddr_t arg)
306 {
307 struct adf_user_reserve_ring reserve;
308 struct adf_uio_control_bundle *bundle;
309 struct adf_uio_instance_rings *instance_rings;
310 int pid_entry_found;
311
312 reserve = *((struct adf_user_reserve_ring *)arg);
313
314 bundle = adf_ctl_ioctl_bundle(reserve);
315 if (!bundle) {
316 pr_err("QAT: Failed to get bundle\n");
317 return -EINVAL;
318 }
319
320 /* Find the list entry for this process */
321 pid_entry_found = 0;
322 mutex_lock(&bundle->list_lock);
323 list_for_each_entry(instance_rings, &bundle->list, list) {
324 if (instance_rings->user_pid == curproc->p_pid) {
325 pid_entry_found = 1;
326 break;
327 }
328 }
329 mutex_unlock(&bundle->list_lock);
330
331 if (!pid_entry_found) {
332 pr_err("QAT: No ring reservation found for PID %d\n",
333 curproc->p_pid);
334 return -EINVAL;
335 }
336
337 if ((instance_rings->ring_mask & reserve.ring_mask) !=
338 reserve.ring_mask) {
339 pr_err("QAT: Attempt to release rings not reserved by this process\n");
340 return -EINVAL;
341 }
342
343 instance_rings->ring_mask &= ~reserve.ring_mask;
344 mutex_lock(&bundle->lock);
345 bundle->rings_used &= ~reserve.ring_mask;
346 mutex_unlock(&bundle->lock);
347
348 return 0;
349 }
350
adf_ctl_ioctl_enable_ring(caddr_t arg)351 static int adf_ctl_ioctl_enable_ring(caddr_t arg)
352 {
353 struct adf_user_reserve_ring reserve;
354 struct adf_uio_control_bundle *bundle;
355
356 reserve = *((struct adf_user_reserve_ring *)arg);
357
358 bundle = adf_ctl_ioctl_bundle(reserve);
359 if (!bundle) {
360 pr_err("QAT: Failed to get bundle\n");
361 return -EINVAL;
362 }
363
364 mutex_lock(&bundle->lock);
365 bundle->rings_enabled |= reserve.ring_mask;
366 adf_update_uio_ring_arb(bundle);
367 mutex_unlock(&bundle->lock);
368
369 return 0;
370 }
371
adf_ctl_ioctl_disable_ring(caddr_t arg)372 static int adf_ctl_ioctl_disable_ring(caddr_t arg)
373 {
374 struct adf_user_reserve_ring reserve;
375 struct adf_uio_control_bundle *bundle;
376
377 reserve = *((struct adf_user_reserve_ring *)arg);
378
379 bundle = adf_ctl_ioctl_bundle(reserve);
380 if (!bundle) {
381 pr_err("QAT: Failed to get bundle\n");
382 return -EINVAL;
383 }
384
385 mutex_lock(&bundle->lock);
386 bundle->rings_enabled &= ~reserve.ring_mask;
387 adf_update_uio_ring_arb(bundle);
388 mutex_unlock(&bundle->lock);
389
390 return 0;
391 }
392
adf_ctl_ioctl(struct cdev * dev,u_long cmd,caddr_t arg,int fflag,struct thread * td)393 static int adf_ctl_ioctl(struct cdev *dev,
394 u_long cmd,
395 caddr_t arg,
396 int fflag,
397 struct thread *td)
398 {
399 int ret = 0;
400 bool allowed = false;
401 int i;
402 static const unsigned int unrestricted_cmds[] = {
403 IOCTL_GET_NUM_DEVICES, IOCTL_STATUS_ACCEL_DEV,
404 IOCTL_HEARTBEAT_ACCEL_DEV, IOCTL_GET_CFG_VAL,
405 IOCTL_RESERVE_RING, IOCTL_RELEASE_RING,
406 IOCTL_ENABLE_RING, IOCTL_DISABLE_RING,
407 };
408
409 if (priv_check(curthread, PRIV_DRIVER)) {
410 for (i = 0; i < ARRAY_SIZE(unrestricted_cmds); i++) {
411 if (cmd == unrestricted_cmds[i]) {
412 allowed = true;
413 break;
414 }
415 }
416 if (!allowed)
417 return EACCES;
418 }
419
420 /* All commands have an argument */
421 if (!arg)
422 return EFAULT;
423
424 if (sx_xlock_sig(&adf_ctl_lock))
425 return EINTR;
426
427 switch (cmd) {
428 case IOCTL_GET_NUM_DEVICES:
429 ret = adf_ctl_ioctl_get_num_devices(cmd, arg);
430 break;
431 case IOCTL_STATUS_ACCEL_DEV:
432 ret = adf_ctl_ioctl_get_status(cmd, arg);
433 break;
434 case IOCTL_GET_CFG_VAL:
435 ret = adf_ctl_ioctl_dev_get_value(cmd, arg);
436 break;
437 case IOCTL_RESERVE_RING:
438 ret = adf_ctl_ioctl_reserve_ring(arg);
439 break;
440 case IOCTL_RELEASE_RING:
441 ret = adf_ctl_ioctl_release_ring(arg);
442 break;
443 case IOCTL_ENABLE_RING:
444 ret = adf_ctl_ioctl_enable_ring(arg);
445 break;
446 case IOCTL_DISABLE_RING:
447 ret = adf_ctl_ioctl_disable_ring(arg);
448 break;
449 case IOCTL_HEARTBEAT_ACCEL_DEV:
450 ret = adf_ctl_ioctl_heartbeat(cmd, arg);
451 break;
452 default:
453 printf("QAT: Invalid ioctl\n");
454 ret = ENOTTY;
455 break;
456 }
457 sx_xunlock(&adf_ctl_lock);
458 return ret;
459 }
460
461 int
adf_register_ctl_device_driver(void)462 adf_register_ctl_device_driver(void)
463 {
464 sx_init(&adf_ctl_lock, "adf ctl");
465
466 if (adf_chr_drv_create())
467 goto err_chr_dev;
468
469 adf_state_init();
470 if (adf_processes_dev_register() != 0)
471 goto err_processes_dev_register;
472 return 0;
473
474 err_processes_dev_register:
475 adf_chr_drv_destroy();
476 err_chr_dev:
477 sx_destroy(&adf_ctl_lock);
478 return EFAULT;
479 }
480
481 void
adf_unregister_ctl_device_driver(void)482 adf_unregister_ctl_device_driver(void)
483 {
484 adf_processes_dev_unregister();
485 adf_state_destroy();
486 adf_chr_drv_destroy();
487 adf_clean_vf_map(false);
488 sx_destroy(&adf_ctl_lock);
489 }
490