xref: /freebsd/sys/dev/qat/qat_common/adf_ctl_drv.c (revision 7ef62cebc2f965b0f640263e179276928885e33d)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright(c) 2007-2022 Intel Corporation */
3 /* $FreeBSD$ */
4 #include "qat_freebsd.h"
5 #include "adf_cfg.h"
6 #include "adf_common_drv.h"
7 #include "adf_accel_devices.h"
8 #include "icp_qat_uclo.h"
9 #include "icp_qat_fw.h"
10 #include "icp_qat_fw_init_admin.h"
11 #include "adf_cfg_strings.h"
12 #include "adf_uio_control.h"
13 #include "adf_uio_cleanup.h"
14 #include "adf_uio.h"
15 #include "adf_transport_access_macros.h"
16 #include "adf_transport_internal.h"
17 #include <sys/bus.h>
18 #include <sys/lock.h>
19 #include <sys/kernel.h>
20 #include <sys/module.h>
21 #include <sys/sx.h>
22 #include <sys/malloc.h>
23 #include <machine/atomic.h>
24 #include <dev/pci/pcivar.h>
25 #include <sys/conf.h>
26 #include <sys/systm.h>
27 #include <sys/queue.h>
28 #include <sys/proc.h>
29 #include <sys/types.h>
30 #include <sys/priv.h>
31 #include <linux/list.h>
32 #include "adf_accel_devices.h"
33 #include "adf_common_drv.h"
34 #include "adf_cfg.h"
35 #include "adf_cfg_common.h"
36 #include "adf_cfg_user.h"
37 #include "adf_heartbeat.h"
38 #include "adf_cfg_device.h"
39 
40 #define DEVICE_NAME "qat_adf_ctl"
41 
42 static struct sx adf_ctl_lock;
43 
44 static d_ioctl_t adf_ctl_ioctl;
45 
46 void *misc_counter;
47 
48 static struct cdevsw adf_ctl_cdevsw = {
49 	.d_version = D_VERSION,
50 	.d_ioctl = adf_ctl_ioctl,
51 	.d_name = DEVICE_NAME,
52 };
53 
54 static struct cdev *adf_ctl_dev;
55 
56 static void adf_chr_drv_destroy(void)
57 {
58 	destroy_dev(adf_ctl_dev);
59 }
60 
61 struct adf_user_addr_info {
62 	struct list_head list;
63 	void *user_addr;
64 };
65 
66 static int adf_chr_drv_create(void)
67 {
68 	adf_ctl_dev = make_dev(&adf_ctl_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600,
69 			       DEVICE_NAME);
70 
71 	if (!adf_ctl_dev) {
72 		printf("QAT: failed to create device\n");
73 		goto err_cdev_del;
74 	}
75 	return 0;
76 err_cdev_del:
77 	return EFAULT;
78 }
79 
80 static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
81 				   caddr_t arg)
82 {
83 	*ctl_data = (struct adf_user_cfg_ctl_data *)arg;
84 	return 0;
85 }
86 
87 static int adf_copy_keyval_to_user(struct adf_accel_dev *accel_dev,
88 				   struct adf_user_cfg_ctl_data *ctl_data)
89 {
90 	struct adf_user_cfg_key_val key_val;
91 	struct adf_user_cfg_section section;
92 	char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0};
93 	char *user_ptr;
94 
95 	if (copyin(ctl_data->config_section, &section,
96 		   sizeof(struct adf_user_cfg_section))) {
97 		device_printf(GET_DEV(accel_dev),
98 			      "failed to copy section info\n");
99 		return EFAULT;
100 	}
101 
102 	if (copyin(section.params, &key_val,
103 		   sizeof(struct adf_user_cfg_key_val))) {
104 		device_printf(GET_DEV(accel_dev), "failed to copy key val\n");
105 		return EFAULT;
106 	}
107 
108 	user_ptr = ((char *)section.params) + ADF_CFG_MAX_KEY_LEN_IN_BYTES;
109 
110 	if (adf_cfg_get_param_value(
111 		accel_dev, section.name, key_val.key, val)) {
112 		return EFAULT;
113 	}
114 
115 	if (copyout(val, user_ptr,
116 		    ADF_CFG_MAX_VAL_LEN_IN_BYTES)) {
117 		device_printf(GET_DEV(accel_dev),
118 			      "failed to copy keyvalue to user!\n");
119 		return EFAULT;
120 	}
121 
122 	return 0;
123 }
124 
125 static int adf_ctl_ioctl_get_num_devices(unsigned int cmd,
126 					 caddr_t arg)
127 {
128 	adf_devmgr_get_num_dev((uint32_t *)arg);
129 
130 	return 0;
131 }
132 
133 static int adf_ctl_ioctl_get_status(unsigned int cmd,
134 				    caddr_t arg)
135 {
136 	struct adf_hw_device_data *hw_data;
137 	struct adf_dev_status_info *dev_info;
138 	struct adf_accel_dev *accel_dev;
139 
140 	dev_info = (struct adf_dev_status_info *)arg;
141 
142 	accel_dev = adf_devmgr_get_dev_by_id(dev_info->accel_id);
143 	if (!accel_dev)
144 		return ENODEV;
145 
146 	hw_data = accel_dev->hw_device;
147 	dev_info->state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
148 	dev_info->num_ae = hw_data->get_num_aes(hw_data);
149 	dev_info->num_accel = hw_data->get_num_accels(hw_data);
150 	dev_info->num_logical_accel = hw_data->num_logical_accel;
151 	dev_info->banks_per_accel = hw_data->num_banks
152 	/ hw_data->num_logical_accel;
153 	strlcpy(dev_info->name, hw_data->dev_class->name,
154 		sizeof(dev_info->name));
155 	dev_info->instance_id = hw_data->instance_id;
156 	dev_info->type = hw_data->dev_class->type;
157 	dev_info->bus = pci_get_bus(accel_to_pci_dev(accel_dev));
158 	dev_info->dev = pci_get_slot(accel_to_pci_dev(accel_dev));
159 	dev_info->fun = pci_get_function(accel_to_pci_dev(accel_dev));
160 	dev_info->domain = pci_get_domain(accel_to_pci_dev(accel_dev));
161 
162 	dev_info->pci_device_id = pci_get_device(accel_to_pci_dev(accel_dev));
163 
164 	dev_info->node_id = accel_dev->accel_pci_dev.node;
165 	dev_info->sku = accel_dev->accel_pci_dev.sku;
166 
167 	dev_info->device_mem_available = accel_dev->aram_info ?
168 		accel_dev->aram_info->inter_buff_aram_region_size : 0;
169 
170 	return 0;
171 }
172 
173 static int adf_ctl_ioctl_heartbeat(unsigned int cmd,
174 				   caddr_t arg)
175 {
176 	int ret = 0;
177 	struct adf_accel_dev *accel_dev;
178 	struct adf_dev_heartbeat_status_ctl *hb_status;
179 
180 	hb_status = (struct adf_dev_heartbeat_status_ctl *)arg;
181 
182 	accel_dev = adf_devmgr_get_dev_by_id(hb_status->device_id);
183 	if (!accel_dev)
184 		return ENODEV;
185 
186 	if (adf_heartbeat_status(accel_dev, &hb_status->status)) {
187 		device_printf(GET_DEV(accel_dev),
188 			      "failed to get heartbeat status\n");
189 		return EAGAIN;
190 	}
191 	return ret;
192 }
193 
194 static int adf_ctl_ioctl_dev_get_value(unsigned int cmd,
195 				       caddr_t arg)
196 {
197 	int ret = 0;
198 	struct adf_user_cfg_ctl_data *ctl_data;
199 	struct adf_accel_dev *accel_dev;
200 
201 	ret = adf_ctl_alloc_resources(&ctl_data, arg);
202 	if (ret)
203 		return ret;
204 
205 	accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
206 	if (!accel_dev) {
207 		printf("QAT: Device %d not found\n", ctl_data->device_id);
208 		ret = ENODEV;
209 		goto out;
210 	}
211 
212 	ret = adf_copy_keyval_to_user(accel_dev, ctl_data);
213 	if (ret) {
214 		ret = ENODEV;
215 		goto out;
216 	}
217 out:
218 	return ret;
219 }
220 
221 static struct adf_uio_control_bundle
222 	*adf_ctl_ioctl_bundle(struct adf_user_reserve_ring reserve)
223 {
224 	struct adf_accel_dev *accel_dev;
225 	struct adf_uio_control_accel *accel;
226 	struct adf_uio_control_bundle *bundle = NULL;
227 	u8 num_rings_per_bank = 0;
228 
229 	accel_dev = adf_devmgr_get_dev_by_id(reserve.accel_id);
230 	if (!accel_dev) {
231 		pr_err("QAT: Failed to get accel_dev\n");
232 		return NULL;
233 	}
234 	num_rings_per_bank = accel_dev->hw_device->num_rings_per_bank;
235 
236 	accel = accel_dev->accel;
237 	if (!accel) {
238 		pr_err("QAT: Failed to get accel\n");
239 		return NULL;
240 	}
241 
242 	if (reserve.bank_nr >= GET_MAX_BANKS(accel_dev)) {
243 		pr_err("QAT: Invalid bank number %d\n", reserve.bank_nr);
244 		return NULL;
245 	}
246 	if (reserve.ring_mask & ~((1 << num_rings_per_bank) - 1)) {
247 		pr_err("QAT: Invalid ring mask %0X\n", reserve.ring_mask);
248 		return NULL;
249 	}
250 	if (accel->num_ker_bundles > reserve.bank_nr) {
251 		pr_err("QAT: Invalid user reserved bank\n");
252 		return NULL;
253 	}
254 	bundle = &accel->bundle[reserve.bank_nr];
255 
256 	return bundle;
257 }
258 
259 static int adf_ctl_ioctl_reserve_ring(caddr_t arg)
260 {
261 	struct adf_user_reserve_ring reserve = {0};
262 	struct adf_uio_control_bundle *bundle;
263 	struct adf_uio_instance_rings *instance_rings;
264 	int pid_entry_found = 0;
265 
266 	reserve = *((struct adf_user_reserve_ring *)arg);
267 
268 	bundle = adf_ctl_ioctl_bundle(reserve);
269 	if (!bundle) {
270 		pr_err("QAT: Failed to get bundle\n");
271 		return -EINVAL;
272 	}
273 
274 	mutex_lock(&bundle->lock);
275 	if (bundle->rings_used & reserve.ring_mask) {
276 		pr_err("QAT: Bundle %d, rings 0x%04X already reserved\n",
277 		       reserve.bank_nr,
278 		       reserve.ring_mask);
279 		mutex_unlock(&bundle->lock);
280 		return -EINVAL;
281 	}
282 	mutex_unlock(&bundle->lock);
283 
284 	/* Find the list entry for this process */
285 	mutex_lock(&bundle->list_lock);
286 	list_for_each_entry(instance_rings, &bundle->list, list) {
287 		if (instance_rings->user_pid == curproc->p_pid) {
288 			pid_entry_found = 1;
289 			break;
290 		}
291 	}
292 	mutex_unlock(&bundle->list_lock);
293 
294 	if (!pid_entry_found) {
295 		pr_err("QAT: process %d not found\n", curproc->p_pid);
296 		return -EINVAL;
297 	}
298 
299 	instance_rings->ring_mask |= reserve.ring_mask;
300 	mutex_lock(&bundle->lock);
301 	bundle->rings_used |= reserve.ring_mask;
302 	mutex_unlock(&bundle->lock);
303 
304 	return 0;
305 }
306 
307 static int adf_ctl_ioctl_release_ring(caddr_t arg)
308 {
309 	struct adf_user_reserve_ring reserve;
310 	struct adf_uio_control_bundle *bundle;
311 	struct adf_uio_instance_rings *instance_rings;
312 	int pid_entry_found;
313 
314 	reserve = *((struct adf_user_reserve_ring *)arg);
315 
316 	bundle = adf_ctl_ioctl_bundle(reserve);
317 	if (!bundle) {
318 		pr_err("QAT: Failed to get bundle\n");
319 		return -EINVAL;
320 	}
321 
322 	/* Find the list entry for this process */
323 	pid_entry_found = 0;
324 	mutex_lock(&bundle->list_lock);
325 	list_for_each_entry(instance_rings, &bundle->list, list) {
326 		if (instance_rings->user_pid == curproc->p_pid) {
327 			pid_entry_found = 1;
328 			break;
329 		}
330 	}
331 	mutex_unlock(&bundle->list_lock);
332 
333 	if (!pid_entry_found) {
334 		pr_err("QAT: No ring reservation found for PID %d\n",
335 		       curproc->p_pid);
336 		return -EINVAL;
337 	}
338 
339 	if ((instance_rings->ring_mask & reserve.ring_mask) !=
340 	    reserve.ring_mask) {
341 		pr_err("QAT: Attempt to release rings not reserved by this process\n");
342 		return -EINVAL;
343 	}
344 
345 	instance_rings->ring_mask &= ~reserve.ring_mask;
346 	mutex_lock(&bundle->lock);
347 	bundle->rings_used &= ~reserve.ring_mask;
348 	mutex_unlock(&bundle->lock);
349 
350 	return 0;
351 }
352 
353 static int adf_ctl_ioctl_enable_ring(caddr_t arg)
354 {
355 	struct adf_user_reserve_ring reserve;
356 	struct adf_uio_control_bundle *bundle;
357 
358 	reserve = *((struct adf_user_reserve_ring *)arg);
359 
360 	bundle = adf_ctl_ioctl_bundle(reserve);
361 	if (!bundle) {
362 		pr_err("QAT: Failed to get bundle\n");
363 		return -EINVAL;
364 	}
365 
366 	mutex_lock(&bundle->lock);
367 	bundle->rings_enabled |= reserve.ring_mask;
368 	adf_update_uio_ring_arb(bundle);
369 	mutex_unlock(&bundle->lock);
370 
371 	return 0;
372 }
373 
374 static int adf_ctl_ioctl_disable_ring(caddr_t arg)
375 {
376 	struct adf_user_reserve_ring reserve;
377 	struct adf_uio_control_bundle *bundle;
378 
379 	reserve = *((struct adf_user_reserve_ring *)arg);
380 
381 	bundle = adf_ctl_ioctl_bundle(reserve);
382 	if (!bundle) {
383 		pr_err("QAT: Failed to get bundle\n");
384 		return -EINVAL;
385 	}
386 
387 	mutex_lock(&bundle->lock);
388 	bundle->rings_enabled &= ~reserve.ring_mask;
389 	adf_update_uio_ring_arb(bundle);
390 	mutex_unlock(&bundle->lock);
391 
392 	return 0;
393 }
394 
395 static int adf_ctl_ioctl(struct cdev *dev,
396 			 u_long cmd,
397 			 caddr_t arg,
398 			 int fflag,
399 			 struct thread *td)
400 {
401 	int ret = 0;
402 	bool allowed = false;
403 	int i;
404 	static const unsigned int unrestricted_cmds[] = {
405 		IOCTL_GET_NUM_DEVICES,     IOCTL_STATUS_ACCEL_DEV,
406 		IOCTL_HEARTBEAT_ACCEL_DEV, IOCTL_GET_CFG_VAL,
407 		IOCTL_RESERVE_RING,	IOCTL_RELEASE_RING,
408 		IOCTL_ENABLE_RING,	 IOCTL_DISABLE_RING,
409 	};
410 
411 	if (priv_check(curthread, PRIV_DRIVER)) {
412 		for (i = 0; i < ARRAY_SIZE(unrestricted_cmds); i++) {
413 			if (cmd == unrestricted_cmds[i]) {
414 				allowed = true;
415 				break;
416 			}
417 		}
418 		if (!allowed)
419 			return EACCES;
420 	}
421 
422 	/* All commands have an argument */
423 	if (!arg)
424 		return EFAULT;
425 
426 	if (sx_xlock_sig(&adf_ctl_lock))
427 		return EINTR;
428 
429 	switch (cmd) {
430 	case IOCTL_GET_NUM_DEVICES:
431 		ret = adf_ctl_ioctl_get_num_devices(cmd, arg);
432 		break;
433 	case IOCTL_STATUS_ACCEL_DEV:
434 		ret = adf_ctl_ioctl_get_status(cmd, arg);
435 		break;
436 	case IOCTL_GET_CFG_VAL:
437 		ret = adf_ctl_ioctl_dev_get_value(cmd, arg);
438 		break;
439 	case IOCTL_RESERVE_RING:
440 		ret = adf_ctl_ioctl_reserve_ring(arg);
441 		break;
442 	case IOCTL_RELEASE_RING:
443 		ret = adf_ctl_ioctl_release_ring(arg);
444 		break;
445 	case IOCTL_ENABLE_RING:
446 		ret = adf_ctl_ioctl_enable_ring(arg);
447 		break;
448 	case IOCTL_DISABLE_RING:
449 		ret = adf_ctl_ioctl_disable_ring(arg);
450 		break;
451 	case IOCTL_HEARTBEAT_ACCEL_DEV:
452 		ret = adf_ctl_ioctl_heartbeat(cmd, arg);
453 		break;
454 	default:
455 		printf("QAT: Invalid ioctl\n");
456 		ret = ENOTTY;
457 		break;
458 	}
459 	sx_xunlock(&adf_ctl_lock);
460 	return ret;
461 }
462 
463 int
464 adf_register_ctl_device_driver(void)
465 {
466 	sx_init(&adf_ctl_lock, "adf ctl");
467 
468 	if (adf_chr_drv_create())
469 		goto err_chr_dev;
470 
471 	adf_state_init();
472 	if (adf_processes_dev_register() != 0)
473 		goto err_processes_dev_register;
474 	return 0;
475 
476 err_processes_dev_register:
477 	adf_chr_drv_destroy();
478 err_chr_dev:
479 	sx_destroy(&adf_ctl_lock);
480 	return EFAULT;
481 }
482 
483 void
484 adf_unregister_ctl_device_driver(void)
485 {
486 	adf_processes_dev_unregister();
487 	adf_state_destroy();
488 	adf_chr_drv_destroy();
489 	adf_clean_vf_map(false);
490 	sx_destroy(&adf_ctl_lock);
491 }
492