xref: /linux/drivers/crypto/virtio/virtio_crypto_mgr.c (revision 22c55fb9eb92395d999b8404d73e58540d11bdd8)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2  /* Management for virtio crypto devices (refer to adf_dev_mgr.c)
3   *
4   * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
5   */
6 
7 #include <linux/mutex.h>
8 #include <linux/list.h>
9 #include <linux/module.h>
10 
11 #include <uapi/linux/virtio_crypto.h>
12 #include "virtio_crypto_common.h"
13 
14 static LIST_HEAD(virtio_crypto_table);
15 static uint32_t num_devices;
16 
17 /* The table_lock protects the above global list and num_devices */
18 static DEFINE_MUTEX(table_lock);
19 
20 #define VIRTIO_CRYPTO_MAX_DEVICES 32
21 
22 
23 /*
24  * virtcrypto_devmgr_add_dev() - Add vcrypto_dev to the acceleration
25  * framework.
26  * @vcrypto_dev:  Pointer to virtio crypto device.
27  *
28  * Function adds virtio crypto device to the global list.
29  * To be used by virtio crypto device specific drivers.
30  *
31  * Return: 0 on success, error code othewise.
32  */
33 int virtcrypto_devmgr_add_dev(struct virtio_crypto *vcrypto_dev)
34 {
35 	struct list_head *itr;
36 
37 	mutex_lock(&table_lock);
38 	if (num_devices == VIRTIO_CRYPTO_MAX_DEVICES) {
39 		pr_info("virtio_crypto: only support up to %d devices\n",
40 			    VIRTIO_CRYPTO_MAX_DEVICES);
41 		mutex_unlock(&table_lock);
42 		return -EFAULT;
43 	}
44 
45 	list_for_each(itr, &virtio_crypto_table) {
46 		struct virtio_crypto *ptr =
47 				list_entry(itr, struct virtio_crypto, list);
48 
49 		if (ptr == vcrypto_dev) {
50 			mutex_unlock(&table_lock);
51 			return -EEXIST;
52 		}
53 	}
54 	atomic_set(&vcrypto_dev->ref_count, 0);
55 	list_add_tail(&vcrypto_dev->list, &virtio_crypto_table);
56 	vcrypto_dev->dev_id = num_devices++;
57 	mutex_unlock(&table_lock);
58 	return 0;
59 }
60 
61 struct list_head *virtcrypto_devmgr_get_head(void)
62 {
63 	return &virtio_crypto_table;
64 }
65 
66 /*
67  * virtcrypto_devmgr_rm_dev() - Remove vcrypto_dev from the acceleration
68  * framework.
69  * @vcrypto_dev:  Pointer to virtio crypto device.
70  *
71  * Function removes virtio crypto device from the acceleration framework.
72  * To be used by virtio crypto device specific drivers.
73  *
74  * Return: void
75  */
76 void virtcrypto_devmgr_rm_dev(struct virtio_crypto *vcrypto_dev)
77 {
78 	mutex_lock(&table_lock);
79 	list_del(&vcrypto_dev->list);
80 	num_devices--;
81 	mutex_unlock(&table_lock);
82 }
83 
84 /*
85  * virtcrypto_dev_get() - Increment vcrypto_dev reference count
86  * @vcrypto_dev: Pointer to virtio crypto device.
87  *
88  * Increment the vcrypto_dev refcount and if this is the first time
89  * incrementing it during this period the vcrypto_dev is in use,
90  * increment the module refcount too.
91  * To be used by virtio crypto device specific drivers.
92  *
93  * Return: 0 when successful, EFAULT when fail to bump module refcount
94  */
95 int virtcrypto_dev_get(struct virtio_crypto *vcrypto_dev)
96 {
97 	if (atomic_add_return(1, &vcrypto_dev->ref_count) == 1)
98 		if (!try_module_get(vcrypto_dev->owner))
99 			return -EFAULT;
100 	return 0;
101 }
102 
103 /*
104  * virtcrypto_dev_put() - Decrement vcrypto_dev reference count
105  * @vcrypto_dev: Pointer to virtio crypto device.
106  *
107  * Decrement the vcrypto_dev refcount and if this is the last time
108  * decrementing it during this period the vcrypto_dev is in use,
109  * decrement the module refcount too.
110  * To be used by virtio crypto device specific drivers.
111  *
112  * Return: void
113  */
114 void virtcrypto_dev_put(struct virtio_crypto *vcrypto_dev)
115 {
116 	if (atomic_sub_return(1, &vcrypto_dev->ref_count) == 0)
117 		module_put(vcrypto_dev->owner);
118 }
119 
120 /*
121  * virtcrypto_dev_started() - Check whether device has started
122  * @vcrypto_dev: Pointer to virtio crypto device.
123  *
124  * To be used by virtio crypto device specific drivers.
125  *
126  * Return: 1 when the device has started, 0 otherwise
127  */
128 int virtcrypto_dev_started(struct virtio_crypto *vcrypto_dev)
129 {
130 	return (vcrypto_dev->status & VIRTIO_CRYPTO_S_HW_READY);
131 }
132 
133 /*
134  * virtcrypto_get_dev_node() - Get vcrypto_dev on the node.
135  * @node:  Node id the driver works.
136  * @service: Crypto service that needs to be supported by the
137  *	      dev
138  * @algo: The algorithm number that needs to be supported by the
139  *	  dev
140  *
141  * Function returns the virtio crypto device used fewest on the node,
142  * and supports the given crypto service and algorithm.
143  *
144  * To be used by virtio crypto device specific drivers.
145  *
146  * Return: pointer to vcrypto_dev or NULL if not found.
147  */
148 struct virtio_crypto *virtcrypto_get_dev_node(int node, uint32_t service,
149 					      uint32_t algo)
150 {
151 	struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev;
152 	unsigned long best = ~0;
153 	unsigned long ctr;
154 
155 	mutex_lock(&table_lock);
156 	list_for_each_entry(tmp_dev, virtcrypto_devmgr_get_head(), list) {
157 
158 		if ((node == dev_to_node(&tmp_dev->vdev->dev) ||
159 		     dev_to_node(&tmp_dev->vdev->dev) < 0) &&
160 		    virtcrypto_dev_started(tmp_dev) &&
161 		    virtcrypto_algo_is_supported(tmp_dev, service, algo)) {
162 			ctr = atomic_read(&tmp_dev->ref_count);
163 			if (best > ctr) {
164 				vcrypto_dev = tmp_dev;
165 				best = ctr;
166 			}
167 		}
168 	}
169 
170 	if (!vcrypto_dev) {
171 		pr_info("virtio_crypto: Could not find a device on node %d\n",
172 				node);
173 		/* Get any started device */
174 		list_for_each_entry(tmp_dev,
175 				virtcrypto_devmgr_get_head(), list) {
176 			if (virtcrypto_dev_started(tmp_dev) &&
177 			    virtcrypto_algo_is_supported(tmp_dev,
178 			    service, algo)) {
179 				vcrypto_dev = tmp_dev;
180 				break;
181 			}
182 		}
183 	}
184 	mutex_unlock(&table_lock);
185 	if (!vcrypto_dev)
186 		return NULL;
187 
188 	virtcrypto_dev_get(vcrypto_dev);
189 	return vcrypto_dev;
190 }
191 
192 /*
193  * virtcrypto_dev_start() - Start virtio crypto device
194  * @vcrypto:    Pointer to virtio crypto device.
195  *
196  * Function notifies all the registered services that the virtio crypto device
197  * is ready to be used.
198  * To be used by virtio crypto device specific drivers.
199  *
200  * Return: 0 on success, EFAULT when fail to register algorithms
201  */
202 int virtcrypto_dev_start(struct virtio_crypto *vcrypto)
203 {
204 	if (virtio_crypto_skcipher_algs_register(vcrypto)) {
205 		pr_err("virtio_crypto: Failed to register crypto skcipher algs\n");
206 		return -EFAULT;
207 	}
208 
209 	if (virtio_crypto_akcipher_algs_register(vcrypto)) {
210 		pr_err("virtio_crypto: Failed to register crypto akcipher algs\n");
211 		virtio_crypto_skcipher_algs_unregister(vcrypto);
212 		return -EFAULT;
213 	}
214 
215 	return 0;
216 }
217 
218 /*
219  * virtcrypto_dev_stop() - Stop virtio crypto device
220  * @vcrypto:    Pointer to virtio crypto device.
221  *
222  * Function notifies all the registered services that the virtio crypto device
223  * shall no longer be used.
224  * To be used by virtio crypto device specific drivers.
225  *
226  * Return: void
227  */
228 void virtcrypto_dev_stop(struct virtio_crypto *vcrypto)
229 {
230 	virtio_crypto_skcipher_algs_unregister(vcrypto);
231 	virtio_crypto_akcipher_algs_unregister(vcrypto);
232 }
233 
234 /*
235  * vcrypto_algo_is_supported()
236  * @vcrypto: Pointer to virtio crypto device.
237  * @service: The bit number for service validate.
238  *	      See VIRTIO_CRYPTO_SERVICE_*
239  * @algo : The bit number for the algorithm to validate.
240  *
241  *
242  * Validate if the virtio crypto device supports a service and
243  * algo.
244  *
245  * Return true if device supports a service and algo.
246  */
247 
248 bool virtcrypto_algo_is_supported(struct virtio_crypto *vcrypto,
249 				  uint32_t service,
250 				  uint32_t algo)
251 {
252 	uint32_t service_mask = 1u << service;
253 	uint32_t algo_mask = 0;
254 	bool low = true;
255 
256 	if (algo > 31) {
257 		algo -= 32;
258 		low = false;
259 	}
260 
261 	if (!(vcrypto->crypto_services & service_mask))
262 		return false;
263 
264 	switch (service) {
265 	case VIRTIO_CRYPTO_SERVICE_CIPHER:
266 		if (low)
267 			algo_mask = vcrypto->cipher_algo_l;
268 		else
269 			algo_mask = vcrypto->cipher_algo_h;
270 		break;
271 
272 	case VIRTIO_CRYPTO_SERVICE_HASH:
273 		algo_mask = vcrypto->hash_algo;
274 		break;
275 
276 	case VIRTIO_CRYPTO_SERVICE_MAC:
277 		if (low)
278 			algo_mask = vcrypto->mac_algo_l;
279 		else
280 			algo_mask = vcrypto->mac_algo_h;
281 		break;
282 
283 	case VIRTIO_CRYPTO_SERVICE_AEAD:
284 		algo_mask = vcrypto->aead_algo;
285 		break;
286 
287 	case VIRTIO_CRYPTO_SERVICE_AKCIPHER:
288 		algo_mask = vcrypto->akcipher_algo;
289 		break;
290 	}
291 
292 	if (!(algo_mask & (1u << algo)))
293 		return false;
294 
295 	return true;
296 }
297