xref: /freebsd/sys/dev/qat/qat_common/adf_ctl_drv.c (revision d0b2dbfa0ecf2bbc9709efc5e20baf8e4b44bbbf)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 #include "qat_freebsd.h"
4 #include "adf_cfg.h"
5 #include "adf_common_drv.h"
6 #include "adf_accel_devices.h"
7 #include "icp_qat_uclo.h"
8 #include "icp_qat_fw.h"
9 #include "icp_qat_fw_init_admin.h"
10 #include "adf_cfg_strings.h"
11 #include "adf_uio_control.h"
12 #include "adf_uio_cleanup.h"
13 #include "adf_uio.h"
14 #include "adf_transport_access_macros.h"
15 #include "adf_transport_internal.h"
16 #include <sys/bus.h>
17 #include <sys/lock.h>
18 #include <sys/kernel.h>
19 #include <sys/module.h>
20 #include <sys/sx.h>
21 #include <sys/malloc.h>
22 #include <machine/atomic.h>
23 #include <dev/pci/pcivar.h>
24 #include <sys/conf.h>
25 #include <sys/systm.h>
26 #include <sys/queue.h>
27 #include <sys/proc.h>
28 #include <sys/types.h>
29 #include <sys/priv.h>
30 #include <linux/list.h>
31 #include "adf_accel_devices.h"
32 #include "adf_common_drv.h"
33 #include "adf_cfg.h"
34 #include "adf_cfg_common.h"
35 #include "adf_cfg_user.h"
36 #include "adf_heartbeat.h"
37 #include "adf_cfg_device.h"
38 
39 #define DEVICE_NAME "qat_adf_ctl"
40 
41 static struct sx adf_ctl_lock;
42 
43 static d_ioctl_t adf_ctl_ioctl;
44 
45 void *misc_counter;
46 
47 static struct cdevsw adf_ctl_cdevsw = {
48 	.d_version = D_VERSION,
49 	.d_ioctl = adf_ctl_ioctl,
50 	.d_name = DEVICE_NAME,
51 };
52 
53 static struct cdev *adf_ctl_dev;
54 
55 static void adf_chr_drv_destroy(void)
56 {
57 	destroy_dev(adf_ctl_dev);
58 }
59 
60 struct adf_user_addr_info {
61 	struct list_head list;
62 	void *user_addr;
63 };
64 
65 static int adf_chr_drv_create(void)
66 {
67 	adf_ctl_dev = make_dev(&adf_ctl_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
68 			       DEVICE_NAME);
69 
70 	if (!adf_ctl_dev) {
71 		printf("QAT: failed to create device\n");
72 		goto err_cdev_del;
73 	}
74 	return 0;
75 err_cdev_del:
76 	return EFAULT;
77 }
78 
79 static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
80 				   caddr_t arg)
81 {
82 	*ctl_data = (struct adf_user_cfg_ctl_data *)arg;
83 	return 0;
84 }
85 
86 static int adf_copy_keyval_to_user(struct adf_accel_dev *accel_dev,
87 				   struct adf_user_cfg_ctl_data *ctl_data)
88 {
89 	struct adf_user_cfg_key_val key_val;
90 	struct adf_user_cfg_section section;
91 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
92 	char *user_ptr;
93 
94 	if (copyin(ctl_data->config_section, &section,
95 		   sizeof(struct adf_user_cfg_section))) {
96 		device_printf(GET_DEV(accel_dev),
97 			      "failed to copy section info\n");
98 		return EFAULT;
99 	}
100 
101 	if (copyin(section.params, &key_val,
102 		   sizeof(struct adf_user_cfg_key_val))) {
103 		device_printf(GET_DEV(accel_dev), "failed to copy key val\n");
104 		return EFAULT;
105 	}
106 
107 	user_ptr = ((char *)section.params) + ADF_CFG_MAX_KEY_LEN_IN_BYTES;
108 
109 	if (adf_cfg_get_param_value(
110 		accel_dev, section.name, key_val.key, val)) {
111 		return EFAULT;
112 	}
113 
114 	if (copyout(val, user_ptr,
115 		    ADF_CFG_MAX_VAL_LEN_IN_BYTES)) {
116 		device_printf(GET_DEV(accel_dev),
117 			      "failed to copy keyvalue to user!\n");
118 		return EFAULT;
119 	}
120 
121 	return 0;
122 }
123 
124 static int adf_ctl_ioctl_get_num_devices(unsigned int cmd,
125 					 caddr_t arg)
126 {
127 	adf_devmgr_get_num_dev((uint32_t *)arg);
128 
129 	return 0;
130 }
131 
132 static int adf_ctl_ioctl_get_status(unsigned int cmd,
133 				    caddr_t arg)
134 {
135 	struct adf_hw_device_data *hw_data;
136 	struct adf_dev_status_info *dev_info;
137 	struct adf_accel_dev *accel_dev;
138 
139 	dev_info = (struct adf_dev_status_info *)arg;
140 
141 	accel_dev = adf_devmgr_get_dev_by_id(dev_info->accel_id);
142 	if (!accel_dev)
143 		return ENODEV;
144 
145 	hw_data = accel_dev->hw_device;
146 	dev_info->state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
147 	dev_info->num_ae = hw_data->get_num_aes(hw_data);
148 	dev_info->num_accel = hw_data->get_num_accels(hw_data);
149 	dev_info->num_logical_accel = hw_data->num_logical_accel;
150 	dev_info->banks_per_accel = hw_data->num_banks
151 	/ hw_data->num_logical_accel;
152 	strlcpy(dev_info->name, hw_data->dev_class->name,
153 		sizeof(dev_info->name));
154 	dev_info->instance_id = hw_data->instance_id;
155 	dev_info->type = hw_data->dev_class->type;
156 	dev_info->bus = pci_get_bus(accel_to_pci_dev(accel_dev));
157 	dev_info->dev = pci_get_slot(accel_to_pci_dev(accel_dev));
158 	dev_info->fun = pci_get_function(accel_to_pci_dev(accel_dev));
159 	dev_info->domain = pci_get_domain(accel_to_pci_dev(accel_dev));
160 
161 	dev_info->pci_device_id = pci_get_device(accel_to_pci_dev(accel_dev));
162 
163 	dev_info->node_id = accel_dev->accel_pci_dev.node;
164 	dev_info->sku = accel_dev->accel_pci_dev.sku;
165 
166 	dev_info->device_mem_available = accel_dev->aram_info ?
167 		accel_dev->aram_info->inter_buff_aram_region_size : 0;
168 
169 	return 0;
170 }
171 
172 static int adf_ctl_ioctl_heartbeat(unsigned int cmd,
173 				   caddr_t arg)
174 {
175 	int ret = 0;
176 	struct adf_accel_dev *accel_dev;
177 	struct adf_dev_heartbeat_status_ctl *hb_status;
178 
179 	hb_status = (struct adf_dev_heartbeat_status_ctl *)arg;
180 
181 	accel_dev = adf_devmgr_get_dev_by_id(hb_status->device_id);
182 	if (!accel_dev)
183 		return ENODEV;
184 
185 	if (adf_heartbeat_status(accel_dev, &hb_status->status)) {
186 		device_printf(GET_DEV(accel_dev),
187 			      "failed to get heartbeat status\n");
188 		return EAGAIN;
189 	}
190 	return ret;
191 }
192 
193 static int adf_ctl_ioctl_dev_get_value(unsigned int cmd,
194 				       caddr_t arg)
195 {
196 	int ret = 0;
197 	struct adf_user_cfg_ctl_data *ctl_data;
198 	struct adf_accel_dev *accel_dev;
199 
200 	ret = adf_ctl_alloc_resources(&ctl_data, arg);
201 	if (ret)
202 		return ret;
203 
204 	accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
205 	if (!accel_dev) {
206 		printf("QAT: Device %d not found\n", ctl_data->device_id);
207 		ret = ENODEV;
208 		goto out;
209 	}
210 
211 	ret = adf_copy_keyval_to_user(accel_dev, ctl_data);
212 	if (ret) {
213 		ret = ENODEV;
214 		goto out;
215 	}
216 out:
217 	return ret;
218 }
219 
220 static struct adf_uio_control_bundle
221 	*adf_ctl_ioctl_bundle(struct adf_user_reserve_ring reserve)
222 {
223 	struct adf_accel_dev *accel_dev;
224 	struct adf_uio_control_accel *accel;
225 	struct adf_uio_control_bundle *bundle = NULL;
226 	u8 num_rings_per_bank = 0;
227 
228 	accel_dev = adf_devmgr_get_dev_by_id(reserve.accel_id);
229 	if (!accel_dev) {
230 		pr_err("QAT: Failed to get accel_dev\n");
231 		return NULL;
232 	}
233 	num_rings_per_bank = accel_dev->hw_device->num_rings_per_bank;
234 
235 	accel = accel_dev->accel;
236 	if (!accel) {
237 		pr_err("QAT: Failed to get accel\n");
238 		return NULL;
239 	}
240 
241 	if (reserve.bank_nr >= GET_MAX_BANKS(accel_dev)) {
242 		pr_err("QAT: Invalid bank number %d\n", reserve.bank_nr);
243 		return NULL;
244 	}
245 	if (reserve.ring_mask & ~((1 << num_rings_per_bank) - 1)) {
246 		pr_err("QAT: Invalid ring mask %0X\n", reserve.ring_mask);
247 		return NULL;
248 	}
249 	if (accel->num_ker_bundles > reserve.bank_nr) {
250 		pr_err("QAT: Invalid user reserved bank\n");
251 		return NULL;
252 	}
253 	bundle = &accel->bundle[reserve.bank_nr];
254 
255 	return bundle;
256 }
257 
258 static int adf_ctl_ioctl_reserve_ring(caddr_t arg)
259 {
260 	struct adf_user_reserve_ring reserve = {0};
261 	struct adf_uio_control_bundle *bundle;
262 	struct adf_uio_instance_rings *instance_rings;
263 	int pid_entry_found = 0;
264 
265 	reserve = *((struct adf_user_reserve_ring *)arg);
266 
267 	bundle = adf_ctl_ioctl_bundle(reserve);
268 	if (!bundle) {
269 		pr_err("QAT: Failed to get bundle\n");
270 		return -EINVAL;
271 	}
272 
273 	mutex_lock(&bundle->lock);
274 	if (bundle->rings_used & reserve.ring_mask) {
275 		pr_err("QAT: Bundle %d, rings 0x%04X already reserved\n",
276 		       reserve.bank_nr,
277 		       reserve.ring_mask);
278 		mutex_unlock(&bundle->lock);
279 		return -EINVAL;
280 	}
281 	mutex_unlock(&bundle->lock);
282 
283 	/* Find the list entry for this process */
284 	mutex_lock(&bundle->list_lock);
285 	list_for_each_entry(instance_rings, &bundle->list, list) {
286 		if (instance_rings->user_pid == curproc->p_pid) {
287 			pid_entry_found = 1;
288 			break;
289 		}
290 	}
291 	mutex_unlock(&bundle->list_lock);
292 
293 	if (!pid_entry_found) {
294 		pr_err("QAT: process %d not found\n", curproc->p_pid);
295 		return -EINVAL;
296 	}
297 
298 	instance_rings->ring_mask |= reserve.ring_mask;
299 	mutex_lock(&bundle->lock);
300 	bundle->rings_used |= reserve.ring_mask;
301 	mutex_unlock(&bundle->lock);
302 
303 	return 0;
304 }
305 
306 static int adf_ctl_ioctl_release_ring(caddr_t arg)
307 {
308 	struct adf_user_reserve_ring reserve;
309 	struct adf_uio_control_bundle *bundle;
310 	struct adf_uio_instance_rings *instance_rings;
311 	int pid_entry_found;
312 
313 	reserve = *((struct adf_user_reserve_ring *)arg);
314 
315 	bundle = adf_ctl_ioctl_bundle(reserve);
316 	if (!bundle) {
317 		pr_err("QAT: Failed to get bundle\n");
318 		return -EINVAL;
319 	}
320 
321 	/* Find the list entry for this process */
322 	pid_entry_found = 0;
323 	mutex_lock(&bundle->list_lock);
324 	list_for_each_entry(instance_rings, &bundle->list, list) {
325 		if (instance_rings->user_pid == curproc->p_pid) {
326 			pid_entry_found = 1;
327 			break;
328 		}
329 	}
330 	mutex_unlock(&bundle->list_lock);
331 
332 	if (!pid_entry_found) {
333 		pr_err("QAT: No ring reservation found for PID %d\n",
334 		       curproc->p_pid);
335 		return -EINVAL;
336 	}
337 
338 	if ((instance_rings->ring_mask & reserve.ring_mask) !=
339 	    reserve.ring_mask) {
340 		pr_err("QAT: Attempt to release rings not reserved by this process\n");
341 		return -EINVAL;
342 	}
343 
344 	instance_rings->ring_mask &= ~reserve.ring_mask;
345 	mutex_lock(&bundle->lock);
346 	bundle->rings_used &= ~reserve.ring_mask;
347 	mutex_unlock(&bundle->lock);
348 
349 	return 0;
350 }
351 
352 static int adf_ctl_ioctl_enable_ring(caddr_t arg)
353 {
354 	struct adf_user_reserve_ring reserve;
355 	struct adf_uio_control_bundle *bundle;
356 
357 	reserve = *((struct adf_user_reserve_ring *)arg);
358 
359 	bundle = adf_ctl_ioctl_bundle(reserve);
360 	if (!bundle) {
361 		pr_err("QAT: Failed to get bundle\n");
362 		return -EINVAL;
363 	}
364 
365 	mutex_lock(&bundle->lock);
366 	bundle->rings_enabled |= reserve.ring_mask;
367 	adf_update_uio_ring_arb(bundle);
368 	mutex_unlock(&bundle->lock);
369 
370 	return 0;
371 }
372 
373 static int adf_ctl_ioctl_disable_ring(caddr_t arg)
374 {
375 	struct adf_user_reserve_ring reserve;
376 	struct adf_uio_control_bundle *bundle;
377 
378 	reserve = *((struct adf_user_reserve_ring *)arg);
379 
380 	bundle = adf_ctl_ioctl_bundle(reserve);
381 	if (!bundle) {
382 		pr_err("QAT: Failed to get bundle\n");
383 		return -EINVAL;
384 	}
385 
386 	mutex_lock(&bundle->lock);
387 	bundle->rings_enabled &= ~reserve.ring_mask;
388 	adf_update_uio_ring_arb(bundle);
389 	mutex_unlock(&bundle->lock);
390 
391 	return 0;
392 }
393 
394 static int adf_ctl_ioctl(struct cdev *dev,
395 			 u_long cmd,
396 			 caddr_t arg,
397 			 int fflag,
398 			 struct thread *td)
399 {
400 	int ret = 0;
401 	bool allowed = false;
402 	int i;
403 	static const unsigned int unrestricted_cmds[] = {
404 		IOCTL_GET_NUM_DEVICES,     IOCTL_STATUS_ACCEL_DEV,
405 		IOCTL_HEARTBEAT_ACCEL_DEV, IOCTL_GET_CFG_VAL,
406 		IOCTL_RESERVE_RING,	IOCTL_RELEASE_RING,
407 		IOCTL_ENABLE_RING,	 IOCTL_DISABLE_RING,
408 	};
409 
410 	if (priv_check(curthread, PRIV_DRIVER)) {
411 		for (i = 0; i < ARRAY_SIZE(unrestricted_cmds); i++) {
412 			if (cmd == unrestricted_cmds[i]) {
413 				allowed = true;
414 				break;
415 			}
416 		}
417 		if (!allowed)
418 			return EACCES;
419 	}
420 
421 	/* All commands have an argument */
422 	if (!arg)
423 		return EFAULT;
424 
425 	if (sx_xlock_sig(&adf_ctl_lock))
426 		return EINTR;
427 
428 	switch (cmd) {
429 	case IOCTL_GET_NUM_DEVICES:
430 		ret = adf_ctl_ioctl_get_num_devices(cmd, arg);
431 		break;
432 	case IOCTL_STATUS_ACCEL_DEV:
433 		ret = adf_ctl_ioctl_get_status(cmd, arg);
434 		break;
435 	case IOCTL_GET_CFG_VAL:
436 		ret = adf_ctl_ioctl_dev_get_value(cmd, arg);
437 		break;
438 	case IOCTL_RESERVE_RING:
439 		ret = adf_ctl_ioctl_reserve_ring(arg);
440 		break;
441 	case IOCTL_RELEASE_RING:
442 		ret = adf_ctl_ioctl_release_ring(arg);
443 		break;
444 	case IOCTL_ENABLE_RING:
445 		ret = adf_ctl_ioctl_enable_ring(arg);
446 		break;
447 	case IOCTL_DISABLE_RING:
448 		ret = adf_ctl_ioctl_disable_ring(arg);
449 		break;
450 	case IOCTL_HEARTBEAT_ACCEL_DEV:
451 		ret = adf_ctl_ioctl_heartbeat(cmd, arg);
452 		break;
453 	default:
454 		printf("QAT: Invalid ioctl\n");
455 		ret = ENOTTY;
456 		break;
457 	}
458 	sx_xunlock(&adf_ctl_lock);
459 	return ret;
460 }
461 
462 int
463 adf_register_ctl_device_driver(void)
464 {
465 	sx_init(&adf_ctl_lock, "adf ctl");
466 
467 	if (adf_chr_drv_create())
468 		goto err_chr_dev;
469 
470 	adf_state_init();
471 	if (adf_processes_dev_register() != 0)
472 		goto err_processes_dev_register;
473 	return 0;
474 
475 err_processes_dev_register:
476 	adf_chr_drv_destroy();
477 err_chr_dev:
478 	sx_destroy(&adf_ctl_lock);
479 	return EFAULT;
480 }
481 
482 void
483 adf_unregister_ctl_device_driver(void)
484 {
485 	adf_processes_dev_unregister();
486 	adf_state_destroy();
487 	adf_chr_drv_destroy();
488 	adf_clean_vf_map(false);
489 	sx_destroy(&adf_ctl_lock);
490 }
491