xref: /linux/drivers/crypto/intel/qat/qat_common/adf_ctl_drv.c (revision 6093a688a07da07808f0122f9aa2a3eed250d853)
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 
4 #include <crypto/algapi.h>
5 #include <linux/module.h>
6 #include <linux/mutex.h>
7 #include <linux/slab.h>
8 #include <linux/fs.h>
9 #include <linux/bitops.h>
10 #include <linux/pci.h>
11 #include <linux/cdev.h>
12 #include <linux/uaccess.h>
13 
14 #include "adf_accel_devices.h"
15 #include "adf_common_drv.h"
16 #include "adf_cfg.h"
17 #include "adf_cfg_common.h"
18 #include "adf_cfg_user.h"
19 
20 #define ADF_CFG_MAX_SECTION 512
21 #define ADF_CFG_MAX_KEY_VAL 256
22 
23 #define DEVICE_NAME "qat_adf_ctl"
24 
25 static DEFINE_MUTEX(adf_ctl_lock);
26 static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
27 
28 static const struct file_operations adf_ctl_ops = {
29 	.owner = THIS_MODULE,
30 	.unlocked_ioctl = adf_ctl_ioctl,
31 	.compat_ioctl = compat_ptr_ioctl,
32 };
33 
34 static const struct class adf_ctl_class = {
35 	.name = DEVICE_NAME,
36 };
37 
38 struct adf_ctl_drv_info {
39 	unsigned int major;
40 	struct cdev drv_cdev;
41 };
42 
43 static struct adf_ctl_drv_info adf_ctl_drv;
44 
45 static void adf_chr_drv_destroy(void)
46 {
47 	device_destroy(&adf_ctl_class, MKDEV(adf_ctl_drv.major, 0));
48 	cdev_del(&adf_ctl_drv.drv_cdev);
49 	class_unregister(&adf_ctl_class);
50 	unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1);
51 }
52 
53 static int adf_chr_drv_create(void)
54 {
55 	dev_t dev_id;
56 	struct device *drv_device;
57 	int ret;
58 
59 	if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
60 		pr_err("QAT: unable to allocate chrdev region\n");
61 		return -EFAULT;
62 	}
63 
64 	ret = class_register(&adf_ctl_class);
65 	if (ret)
66 		goto err_chrdev_unreg;
67 
68 	adf_ctl_drv.major = MAJOR(dev_id);
69 	cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops);
70 	if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) {
71 		pr_err("QAT: cdev add failed\n");
72 		goto err_class_destr;
73 	}
74 
75 	drv_device = device_create(&adf_ctl_class, NULL,
76 				   MKDEV(adf_ctl_drv.major, 0),
77 				   NULL, DEVICE_NAME);
78 	if (IS_ERR(drv_device)) {
79 		pr_err("QAT: failed to create device\n");
80 		goto err_cdev_del;
81 	}
82 	return 0;
83 err_cdev_del:
84 	cdev_del(&adf_ctl_drv.drv_cdev);
85 err_class_destr:
86 	class_unregister(&adf_ctl_class);
87 err_chrdev_unreg:
88 	unregister_chrdev_region(dev_id, 1);
89 	return -EFAULT;
90 }
91 
92 static struct adf_user_cfg_ctl_data *adf_ctl_alloc_resources(unsigned long arg)
93 {
94 	struct adf_user_cfg_ctl_data *cfg_data;
95 
96 	cfg_data = memdup_user((void __user *)arg, sizeof(*cfg_data));
97 	if (IS_ERR(cfg_data))
98 		pr_err("QAT: failed to copy from user cfg_data.\n");
99 	return cfg_data;
100 }
101 
102 static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
103 				  const char *section,
104 				  const struct adf_user_cfg_key_val *key_val)
105 {
106 	if (key_val->type == ADF_HEX) {
107 		long *ptr = (long *)key_val->val;
108 		long val = *ptr;
109 
110 		if (adf_cfg_add_key_value_param(accel_dev, section,
111 						key_val->key, (void *)val,
112 						key_val->type)) {
113 			dev_err(&GET_DEV(accel_dev),
114 				"failed to add hex keyvalue.\n");
115 			return -EFAULT;
116 		}
117 	} else {
118 		if (adf_cfg_add_key_value_param(accel_dev, section,
119 						key_val->key, key_val->val,
120 						key_val->type)) {
121 			dev_err(&GET_DEV(accel_dev),
122 				"failed to add keyvalue.\n");
123 			return -EFAULT;
124 		}
125 	}
126 	return 0;
127 }
128 
129 static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
130 				   struct adf_user_cfg_ctl_data *ctl_data)
131 {
132 	struct adf_user_cfg_key_val key_val;
133 	struct adf_user_cfg_key_val *params_head;
134 	struct adf_user_cfg_section section, *section_head;
135 	int i, j;
136 
137 	section_head = ctl_data->config_section;
138 
139 	for (i = 0; section_head && i < ADF_CFG_MAX_SECTION; i++) {
140 		if (copy_from_user(&section, (void __user *)section_head,
141 				   sizeof(*section_head))) {
142 			dev_err(&GET_DEV(accel_dev),
143 				"failed to copy section info\n");
144 			goto out_err;
145 		}
146 
147 		if (adf_cfg_section_add(accel_dev, section.name)) {
148 			dev_err(&GET_DEV(accel_dev),
149 				"failed to add section.\n");
150 			goto out_err;
151 		}
152 
153 		params_head = section.params;
154 
155 		for (j = 0; params_head && j < ADF_CFG_MAX_KEY_VAL; j++) {
156 			if (copy_from_user(&key_val, (void __user *)params_head,
157 					   sizeof(key_val))) {
158 				dev_err(&GET_DEV(accel_dev),
159 					"Failed to copy keyvalue.\n");
160 				goto out_err;
161 			}
162 			if (adf_add_key_value_data(accel_dev, section.name,
163 						   &key_val)) {
164 				goto out_err;
165 			}
166 			params_head = key_val.next;
167 		}
168 		section_head = section.next;
169 	}
170 	return 0;
171 out_err:
172 	adf_cfg_del_all(accel_dev);
173 	return -EFAULT;
174 }
175 
176 static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
177 				    unsigned long arg)
178 {
179 	struct adf_user_cfg_ctl_data *ctl_data;
180 	struct adf_accel_dev *accel_dev;
181 	int ret = 0;
182 
183 	ctl_data = adf_ctl_alloc_resources(arg);
184 	if (IS_ERR(ctl_data))
185 		return PTR_ERR(ctl_data);
186 
187 	accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
188 	if (!accel_dev) {
189 		ret = -EFAULT;
190 		goto out;
191 	}
192 
193 	if (adf_dev_started(accel_dev)) {
194 		ret = -EFAULT;
195 		goto out;
196 	}
197 
198 	if (adf_copy_key_value_data(accel_dev, ctl_data)) {
199 		ret = -EFAULT;
200 		goto out;
201 	}
202 	set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
203 out:
204 	kfree(ctl_data);
205 	return ret;
206 }
207 
208 static int adf_ctl_is_device_in_use(int id)
209 {
210 	struct adf_accel_dev *dev;
211 
212 	list_for_each_entry(dev, adf_devmgr_get_head(), list) {
213 		if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
214 			if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
215 				dev_info(&GET_DEV(dev),
216 					 "device qat_dev%d is busy\n",
217 					 dev->accel_id);
218 				return -EBUSY;
219 			}
220 		}
221 	}
222 	return 0;
223 }
224 
225 static void adf_ctl_stop_devices(u32 id)
226 {
227 	struct adf_accel_dev *accel_dev;
228 
229 	list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
230 		if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
231 			if (!adf_dev_started(accel_dev))
232 				continue;
233 
234 			/* First stop all VFs */
235 			if (!accel_dev->is_vf)
236 				continue;
237 
238 			adf_dev_down(accel_dev);
239 		}
240 	}
241 
242 	list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
243 		if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
244 			if (!adf_dev_started(accel_dev))
245 				continue;
246 
247 			adf_dev_down(accel_dev);
248 		}
249 	}
250 }
251 
252 static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
253 				  unsigned long arg)
254 {
255 	int ret;
256 	struct adf_user_cfg_ctl_data *ctl_data;
257 
258 	ctl_data = adf_ctl_alloc_resources(arg);
259 	if (IS_ERR(ctl_data))
260 		return PTR_ERR(ctl_data);
261 
262 	if (adf_devmgr_verify_id(ctl_data->device_id)) {
263 		pr_err("QAT: Device %d not found\n", ctl_data->device_id);
264 		ret = -ENODEV;
265 		goto out;
266 	}
267 
268 	ret = adf_ctl_is_device_in_use(ctl_data->device_id);
269 	if (ret)
270 		goto out;
271 
272 	if (ctl_data->device_id == ADF_CFG_ALL_DEVICES)
273 		pr_info("QAT: Stopping all acceleration devices.\n");
274 	else
275 		pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
276 			ctl_data->device_id);
277 
278 	adf_ctl_stop_devices(ctl_data->device_id);
279 
280 out:
281 	kfree(ctl_data);
282 	return ret;
283 }
284 
285 static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
286 				   unsigned long arg)
287 {
288 	int ret;
289 	struct adf_user_cfg_ctl_data *ctl_data;
290 	struct adf_accel_dev *accel_dev;
291 
292 	ctl_data = adf_ctl_alloc_resources(arg);
293 	if (IS_ERR(ctl_data))
294 		return PTR_ERR(ctl_data);
295 
296 	ret = -ENODEV;
297 	accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
298 	if (!accel_dev)
299 		goto out;
300 
301 	dev_info(&GET_DEV(accel_dev),
302 		 "Starting acceleration device qat_dev%d.\n",
303 		 ctl_data->device_id);
304 
305 	ret = adf_dev_up(accel_dev, false);
306 
307 	if (ret) {
308 		dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
309 			ctl_data->device_id);
310 		adf_dev_down(accel_dev);
311 	}
312 out:
313 	kfree(ctl_data);
314 	return ret;
315 }
316 
317 static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
318 					 unsigned long arg)
319 {
320 	u32 num_devices = 0;
321 
322 	adf_devmgr_get_num_dev(&num_devices);
323 	if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
324 		return -EFAULT;
325 
326 	return 0;
327 }
328 
329 static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
330 				    unsigned long arg)
331 {
332 	struct adf_hw_device_data *hw_data;
333 	struct adf_dev_status_info dev_info;
334 	struct adf_accel_dev *accel_dev;
335 
336 	if (copy_from_user(&dev_info, (void __user *)arg,
337 			   sizeof(struct adf_dev_status_info))) {
338 		pr_err("QAT: failed to copy from user.\n");
339 		return -EFAULT;
340 	}
341 
342 	accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
343 	if (!accel_dev)
344 		return -ENODEV;
345 
346 	hw_data = accel_dev->hw_device;
347 	dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
348 	dev_info.num_ae = hw_data->get_num_aes(hw_data);
349 	dev_info.num_accel = hw_data->get_num_accels(hw_data);
350 	dev_info.num_logical_accel = hw_data->num_logical_accel;
351 	dev_info.banks_per_accel = hw_data->num_banks
352 					/ hw_data->num_logical_accel;
353 	strscpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
354 	dev_info.instance_id = hw_data->instance_id;
355 	dev_info.type = hw_data->dev_class->type;
356 	dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
357 	dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn);
358 	dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn);
359 
360 	if (copy_to_user((void __user *)arg, &dev_info,
361 			 sizeof(struct adf_dev_status_info))) {
362 		dev_err(&GET_DEV(accel_dev), "failed to copy status.\n");
363 		return -EFAULT;
364 	}
365 	return 0;
366 }
367 
368 static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
369 {
370 	int ret;
371 
372 	if (mutex_lock_interruptible(&adf_ctl_lock))
373 		return -EFAULT;
374 
375 	switch (cmd) {
376 	case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS:
377 		ret = adf_ctl_ioctl_dev_config(fp, cmd, arg);
378 		break;
379 
380 	case IOCTL_STOP_ACCEL_DEV:
381 		ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg);
382 		break;
383 
384 	case IOCTL_START_ACCEL_DEV:
385 		ret = adf_ctl_ioctl_dev_start(fp, cmd, arg);
386 		break;
387 
388 	case IOCTL_GET_NUM_DEVICES:
389 		ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg);
390 		break;
391 
392 	case IOCTL_STATUS_ACCEL_DEV:
393 		ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
394 		break;
395 	default:
396 		pr_err_ratelimited("QAT: Invalid ioctl %d\n", cmd);
397 		ret = -EFAULT;
398 		break;
399 	}
400 	mutex_unlock(&adf_ctl_lock);
401 	return ret;
402 }
403 
404 static int __init adf_register_ctl_device_driver(void)
405 {
406 	if (adf_chr_drv_create())
407 		goto err_chr_dev;
408 
409 	if (adf_init_misc_wq())
410 		goto err_misc_wq;
411 
412 	if (adf_init_aer())
413 		goto err_aer;
414 
415 	if (adf_init_pf_wq())
416 		goto err_pf_wq;
417 
418 	if (adf_init_vf_wq())
419 		goto err_vf_wq;
420 
421 	if (qat_crypto_register())
422 		goto err_crypto_register;
423 
424 	if (qat_compression_register())
425 		goto err_compression_register;
426 
427 	return 0;
428 
429 err_compression_register:
430 	qat_crypto_unregister();
431 err_crypto_register:
432 	adf_exit_vf_wq();
433 err_vf_wq:
434 	adf_exit_pf_wq();
435 err_pf_wq:
436 	adf_exit_aer();
437 err_aer:
438 	adf_exit_misc_wq();
439 err_misc_wq:
440 	adf_chr_drv_destroy();
441 err_chr_dev:
442 	mutex_destroy(&adf_ctl_lock);
443 	return -EFAULT;
444 }
445 
446 static void __exit adf_unregister_ctl_device_driver(void)
447 {
448 	adf_chr_drv_destroy();
449 	adf_exit_misc_wq();
450 	adf_exit_aer();
451 	adf_exit_vf_wq();
452 	adf_exit_pf_wq();
453 	qat_crypto_unregister();
454 	qat_compression_unregister();
455 	adf_clean_vf_map(false);
456 	mutex_destroy(&adf_ctl_lock);
457 }
458 
459 module_init(adf_register_ctl_device_driver);
460 module_exit(adf_unregister_ctl_device_driver);
461 MODULE_LICENSE("Dual BSD/GPL");
462 MODULE_AUTHOR("Intel");
463 MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
464 MODULE_ALIAS_CRYPTO("intel_qat");
465 MODULE_VERSION(ADF_DRV_VERSION);
466 MODULE_IMPORT_NS("CRYPTO_INTERNAL");
467