xref: /linux/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 
4 #include <crypto/algapi.h>
5 #include <linux/module.h>
6 #include <linux/mutex.h>
7 #include <linux/slab.h>
8 #include <linux/fs.h>
9 #include <linux/bitops.h>
10 #include <linux/pci.h>
11 #include <linux/cdev.h>
12 #include <linux/uaccess.h>
13 
14 #include "adf_accel_devices.h"
15 #include "adf_common_drv.h"
16 #include "adf_cfg.h"
17 #include "adf_cfg_common.h"
18 #include "adf_cfg_user.h"
19 
20 #define ADF_CFG_MAX_SECTION 512
21 #define ADF_CFG_MAX_KEY_VAL 256
22 
23 #define DEVICE_NAME "qat_adf_ctl"
24 
25 static DEFINE_MUTEX(adf_ctl_lock);
26 static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
27 
28 static const struct file_operations adf_ctl_ops = {
29 	.owner = THIS_MODULE,
30 	.unlocked_ioctl = adf_ctl_ioctl,
31 	.compat_ioctl = compat_ptr_ioctl,
32 };
33 
34 static const struct class adf_ctl_class = {
35 	.name = DEVICE_NAME,
36 };
37 
38 struct adf_ctl_drv_info {
39 	unsigned int major;
40 	struct cdev drv_cdev;
41 };
42 
43 static struct adf_ctl_drv_info adf_ctl_drv;
44 
45 static void adf_chr_drv_destroy(void)
46 {
47 	device_destroy(&adf_ctl_class, MKDEV(adf_ctl_drv.major, 0));
48 	cdev_del(&adf_ctl_drv.drv_cdev);
49 	class_unregister(&adf_ctl_class);
50 	unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1);
51 }
52 
53 static int adf_chr_drv_create(void)
54 {
55 	dev_t dev_id;
56 	struct device *drv_device;
57 	int ret;
58 
59 	if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
60 		pr_err("QAT: unable to allocate chrdev region\n");
61 		return -EFAULT;
62 	}
63 
64 	ret = class_register(&adf_ctl_class);
65 	if (ret)
66 		goto err_chrdev_unreg;
67 
68 	adf_ctl_drv.major = MAJOR(dev_id);
69 	cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops);
70 	if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) {
71 		pr_err("QAT: cdev add failed\n");
72 		goto err_class_destr;
73 	}
74 
75 	drv_device = device_create(&adf_ctl_class, NULL,
76 				   MKDEV(adf_ctl_drv.major, 0),
77 				   NULL, DEVICE_NAME);
78 	if (IS_ERR(drv_device)) {
79 		pr_err("QAT: failed to create device\n");
80 		goto err_cdev_del;
81 	}
82 	return 0;
83 err_cdev_del:
84 	cdev_del(&adf_ctl_drv.drv_cdev);
85 err_class_destr:
86 	class_unregister(&adf_ctl_class);
87 err_chrdev_unreg:
88 	unregister_chrdev_region(dev_id, 1);
89 	return -EFAULT;
90 }
91 
92 static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
93 				   unsigned long arg)
94 {
95 	struct adf_user_cfg_ctl_data *cfg_data;
96 
97 	cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
98 	if (!cfg_data)
99 		return -ENOMEM;
100 
101 	/* Initialize device id to NO DEVICE as 0 is a valid device id */
102 	cfg_data->device_id = ADF_CFG_NO_DEVICE;
103 
104 	if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
105 		pr_err("QAT: failed to copy from user cfg_data.\n");
106 		kfree(cfg_data);
107 		return -EIO;
108 	}
109 
110 	*ctl_data = cfg_data;
111 	return 0;
112 }
113 
114 static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
115 				  const char *section,
116 				  const struct adf_user_cfg_key_val *key_val)
117 {
118 	if (key_val->type == ADF_HEX) {
119 		long *ptr = (long *)key_val->val;
120 		long val = *ptr;
121 
122 		if (adf_cfg_add_key_value_param(accel_dev, section,
123 						key_val->key, (void *)val,
124 						key_val->type)) {
125 			dev_err(&GET_DEV(accel_dev),
126 				"failed to add hex keyvalue.\n");
127 			return -EFAULT;
128 		}
129 	} else {
130 		if (adf_cfg_add_key_value_param(accel_dev, section,
131 						key_val->key, key_val->val,
132 						key_val->type)) {
133 			dev_err(&GET_DEV(accel_dev),
134 				"failed to add keyvalue.\n");
135 			return -EFAULT;
136 		}
137 	}
138 	return 0;
139 }
140 
141 static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
142 				   struct adf_user_cfg_ctl_data *ctl_data)
143 {
144 	struct adf_user_cfg_key_val key_val;
145 	struct adf_user_cfg_key_val *params_head;
146 	struct adf_user_cfg_section section, *section_head;
147 	int i, j;
148 
149 	section_head = ctl_data->config_section;
150 
151 	for (i = 0; section_head && i < ADF_CFG_MAX_SECTION; i++) {
152 		if (copy_from_user(&section, (void __user *)section_head,
153 				   sizeof(*section_head))) {
154 			dev_err(&GET_DEV(accel_dev),
155 				"failed to copy section info\n");
156 			goto out_err;
157 		}
158 
159 		if (adf_cfg_section_add(accel_dev, section.name)) {
160 			dev_err(&GET_DEV(accel_dev),
161 				"failed to add section.\n");
162 			goto out_err;
163 		}
164 
165 		params_head = section.params;
166 
167 		for (j = 0; params_head && j < ADF_CFG_MAX_KEY_VAL; j++) {
168 			if (copy_from_user(&key_val, (void __user *)params_head,
169 					   sizeof(key_val))) {
170 				dev_err(&GET_DEV(accel_dev),
171 					"Failed to copy keyvalue.\n");
172 				goto out_err;
173 			}
174 			if (adf_add_key_value_data(accel_dev, section.name,
175 						   &key_val)) {
176 				goto out_err;
177 			}
178 			params_head = key_val.next;
179 		}
180 		section_head = section.next;
181 	}
182 	return 0;
183 out_err:
184 	adf_cfg_del_all(accel_dev);
185 	return -EFAULT;
186 }
187 
188 static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
189 				    unsigned long arg)
190 {
191 	int ret;
192 	struct adf_user_cfg_ctl_data *ctl_data;
193 	struct adf_accel_dev *accel_dev;
194 
195 	ret = adf_ctl_alloc_resources(&ctl_data, arg);
196 	if (ret)
197 		return ret;
198 
199 	accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
200 	if (!accel_dev) {
201 		ret = -EFAULT;
202 		goto out;
203 	}
204 
205 	if (adf_dev_started(accel_dev)) {
206 		ret = -EFAULT;
207 		goto out;
208 	}
209 
210 	if (adf_copy_key_value_data(accel_dev, ctl_data)) {
211 		ret = -EFAULT;
212 		goto out;
213 	}
214 	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
215 out:
216 	kfree(ctl_data);
217 	return ret;
218 }
219 
220 static int adf_ctl_is_device_in_use(int id)
221 {
222 	struct adf_accel_dev *dev;
223 
224 	list_for_each_entry(dev, adf_devmgr_get_head(), list) {
225 		if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
226 			if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
227 				dev_info(&GET_DEV(dev),
228 					 "device qat_dev%d is busy\n",
229 					 dev->accel_id);
230 				return -EBUSY;
231 			}
232 		}
233 	}
234 	return 0;
235 }
236 
237 static void adf_ctl_stop_devices(u32 id)
238 {
239 	struct adf_accel_dev *accel_dev;
240 
241 	list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
242 		if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
243 			if (!adf_dev_started(accel_dev))
244 				continue;
245 
246 			/* First stop all VFs */
247 			if (!accel_dev->is_vf)
248 				continue;
249 
250 			adf_dev_down(accel_dev);
251 		}
252 	}
253 
254 	list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
255 		if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
256 			if (!adf_dev_started(accel_dev))
257 				continue;
258 
259 			adf_dev_down(accel_dev);
260 		}
261 	}
262 }
263 
264 static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
265 				  unsigned long arg)
266 {
267 	int ret;
268 	struct adf_user_cfg_ctl_data *ctl_data;
269 
270 	ret = adf_ctl_alloc_resources(&ctl_data, arg);
271 	if (ret)
272 		return ret;
273 
274 	if (adf_devmgr_verify_id(ctl_data->device_id)) {
275 		pr_err("QAT: Device %d not found\n", ctl_data->device_id);
276 		ret = -ENODEV;
277 		goto out;
278 	}
279 
280 	ret = adf_ctl_is_device_in_use(ctl_data->device_id);
281 	if (ret)
282 		goto out;
283 
284 	if (ctl_data->device_id == ADF_CFG_ALL_DEVICES)
285 		pr_info("QAT: Stopping all acceleration devices.\n");
286 	else
287 		pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
288 			ctl_data->device_id);
289 
290 	adf_ctl_stop_devices(ctl_data->device_id);
291 
292 out:
293 	kfree(ctl_data);
294 	return ret;
295 }
296 
297 static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
298 				   unsigned long arg)
299 {
300 	int ret;
301 	struct adf_user_cfg_ctl_data *ctl_data;
302 	struct adf_accel_dev *accel_dev;
303 
304 	ret = adf_ctl_alloc_resources(&ctl_data, arg);
305 	if (ret)
306 		return ret;
307 
308 	ret = -ENODEV;
309 	accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
310 	if (!accel_dev)
311 		goto out;
312 
313 	dev_info(&GET_DEV(accel_dev),
314 		 "Starting acceleration device qat_dev%d.\n",
315 		 ctl_data->device_id);
316 
317 	ret = adf_dev_up(accel_dev, false);
318 
319 	if (ret) {
320 		dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
321 			ctl_data->device_id);
322 		adf_dev_down(accel_dev);
323 	}
324 out:
325 	kfree(ctl_data);
326 	return ret;
327 }
328 
329 static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
330 					 unsigned long arg)
331 {
332 	u32 num_devices = 0;
333 
334 	adf_devmgr_get_num_dev(&num_devices);
335 	if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
336 		return -EFAULT;
337 
338 	return 0;
339 }
340 
341 static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
342 				    unsigned long arg)
343 {
344 	struct adf_hw_device_data *hw_data;
345 	struct adf_dev_status_info dev_info;
346 	struct adf_accel_dev *accel_dev;
347 
348 	if (copy_from_user(&dev_info, (void __user *)arg,
349 			   sizeof(struct adf_dev_status_info))) {
350 		pr_err("QAT: failed to copy from user.\n");
351 		return -EFAULT;
352 	}
353 
354 	accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
355 	if (!accel_dev)
356 		return -ENODEV;
357 
358 	hw_data = accel_dev->hw_device;
359 	dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
360 	dev_info.num_ae = hw_data->get_num_aes(hw_data);
361 	dev_info.num_accel = hw_data->get_num_accels(hw_data);
362 	dev_info.num_logical_accel = hw_data->num_logical_accel;
363 	dev_info.banks_per_accel = hw_data->num_banks
364 					/ hw_data->num_logical_accel;
365 	strscpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
366 	dev_info.instance_id = hw_data->instance_id;
367 	dev_info.type = hw_data->dev_class->type;
368 	dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
369 	dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn);
370 	dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn);
371 
372 	if (copy_to_user((void __user *)arg, &dev_info,
373 			 sizeof(struct adf_dev_status_info))) {
374 		dev_err(&GET_DEV(accel_dev), "failed to copy status.\n");
375 		return -EFAULT;
376 	}
377 	return 0;
378 }
379 
380 static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
381 {
382 	int ret;
383 
384 	if (mutex_lock_interruptible(&adf_ctl_lock))
385 		return -EFAULT;
386 
387 	switch (cmd) {
388 	case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS:
389 		ret = adf_ctl_ioctl_dev_config(fp, cmd, arg);
390 		break;
391 
392 	case IOCTL_STOP_ACCEL_DEV:
393 		ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg);
394 		break;
395 
396 	case IOCTL_START_ACCEL_DEV:
397 		ret = adf_ctl_ioctl_dev_start(fp, cmd, arg);
398 		break;
399 
400 	case IOCTL_GET_NUM_DEVICES:
401 		ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg);
402 		break;
403 
404 	case IOCTL_STATUS_ACCEL_DEV:
405 		ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
406 		break;
407 	default:
408 		pr_err_ratelimited("QAT: Invalid ioctl %d\n", cmd);
409 		ret = -EFAULT;
410 		break;
411 	}
412 	mutex_unlock(&adf_ctl_lock);
413 	return ret;
414 }
415 
416 static int __init adf_register_ctl_device_driver(void)
417 {
418 	if (adf_chr_drv_create())
419 		goto err_chr_dev;
420 
421 	if (adf_init_misc_wq())
422 		goto err_misc_wq;
423 
424 	if (adf_init_aer())
425 		goto err_aer;
426 
427 	if (adf_init_pf_wq())
428 		goto err_pf_wq;
429 
430 	if (adf_init_vf_wq())
431 		goto err_vf_wq;
432 
433 	if (qat_crypto_register())
434 		goto err_crypto_register;
435 
436 	if (qat_compression_register())
437 		goto err_compression_register;
438 
439 	return 0;
440 
441 err_compression_register:
442 	qat_crypto_unregister();
443 err_crypto_register:
444 	adf_exit_vf_wq();
445 err_vf_wq:
446 	adf_exit_pf_wq();
447 err_pf_wq:
448 	adf_exit_aer();
449 err_aer:
450 	adf_exit_misc_wq();
451 err_misc_wq:
452 	adf_chr_drv_destroy();
453 err_chr_dev:
454 	mutex_destroy(&adf_ctl_lock);
455 	return -EFAULT;
456 }
457 
458 static void __exit adf_unregister_ctl_device_driver(void)
459 {
460 	adf_chr_drv_destroy();
461 	adf_exit_misc_wq();
462 	adf_exit_aer();
463 	adf_exit_vf_wq();
464 	adf_exit_pf_wq();
465 	qat_crypto_unregister();
466 	qat_compression_unregister();
467 	adf_clean_vf_map(false);
468 	mutex_destroy(&adf_ctl_lock);
469 }
470 
471 module_init(adf_register_ctl_device_driver);
472 module_exit(adf_unregister_ctl_device_driver);
473 MODULE_LICENSE("Dual BSD/GPL");
474 MODULE_AUTHOR("Intel");
475 MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
476 MODULE_ALIAS_CRYPTO("intel_qat");
477 MODULE_VERSION(ADF_DRV_VERSION);
478 MODULE_IMPORT_NS(CRYPTO_INTERNAL);
479