xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/lib/crypto.c (revision 9a87ffc99ec8eb8d35eed7c4f816d75f5cc9662e)
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 // Copyright (c) 2019 Mellanox Technologies.
3 
4 #include "mlx5_core.h"
5 #include "lib/crypto.h"
6 
7 #define MLX5_CRYPTO_DEK_POOLS_NUM (MLX5_ACCEL_OBJ_TYPE_KEY_NUM - 1)
8 #define type2idx(type) ((type) - 1)
9 
10 #define MLX5_CRYPTO_DEK_POOL_SYNC_THRESH 128
11 
12 /* calculate the num of DEKs, which are freed by any user
13  * (for example, TLS) after last revalidation in a pool or a bulk.
14  */
15 #define MLX5_CRYPTO_DEK_CALC_FREED(a) \
16 	({ typeof(a) _a = (a); \
17 	   _a->num_deks - _a->avail_deks - _a->in_use_deks; })
18 
19 #define MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) MLX5_CRYPTO_DEK_CALC_FREED(pool)
20 #define MLX5_CRYPTO_DEK_BULK_CALC_FREED(bulk) MLX5_CRYPTO_DEK_CALC_FREED(bulk)
21 
22 #define MLX5_CRYPTO_DEK_BULK_IDLE(bulk) \
23 	({ typeof(bulk) _bulk = (bulk); \
24 	   _bulk->avail_deks == _bulk->num_deks; })
25 
26 enum {
27 	MLX5_CRYPTO_DEK_ALL_TYPE = BIT(0),
28 };
29 
30 struct mlx5_crypto_dek_pool {
31 	struct mlx5_core_dev *mdev;
32 	u32 key_purpose;
33 	int num_deks; /* the total number of keys in this pool */
34 	int avail_deks; /* the number of available keys in this pool */
35 	int in_use_deks; /* the number of being used keys in this pool */
36 	struct mutex lock; /* protect the following lists, and the bulks */
37 	struct list_head partial_list; /* some of keys are available */
38 	struct list_head full_list; /* no available keys */
39 	struct list_head avail_list; /* all keys are available to use */
40 
41 	/* No in-used keys, and all need to be synced.
42 	 * These bulks will be put to avail list after sync.
43 	 */
44 	struct list_head sync_list;
45 
46 	bool syncing;
47 	struct list_head wait_for_free;
48 	struct work_struct sync_work;
49 
50 	spinlock_t destroy_lock; /* protect destroy_list */
51 	struct list_head destroy_list;
52 	struct work_struct destroy_work;
53 };
54 
55 struct mlx5_crypto_dek_bulk {
56 	struct mlx5_core_dev *mdev;
57 	int base_obj_id;
58 	int avail_start; /* the bit to start search */
59 	int num_deks; /* the total number of keys in a bulk */
60 	int avail_deks; /* the number of keys available, with need_sync bit 0 */
61 	int in_use_deks; /* the number of keys being used, with in_use bit 1 */
62 	struct list_head entry;
63 
64 	/* 0: not being used by any user, 1: otherwise */
65 	unsigned long *in_use;
66 
67 	/* The bits are set when they are used, and reset after crypto_sync
68 	 * is executed. So, the value 0 means the key is newly created, or not
69 	 * used after sync, and 1 means it is in use, or freed but not synced
70 	 */
71 	unsigned long *need_sync;
72 };
73 
74 struct mlx5_crypto_dek_priv {
75 	struct mlx5_core_dev *mdev;
76 	int log_dek_obj_range;
77 };
78 
79 struct mlx5_crypto_dek {
80 	struct mlx5_crypto_dek_bulk *bulk;
81 	struct list_head entry;
82 	u32 obj_id;
83 };
84 
mlx5_crypto_dek_get_id(struct mlx5_crypto_dek * dek)85 u32 mlx5_crypto_dek_get_id(struct mlx5_crypto_dek *dek)
86 {
87 	return dek->obj_id;
88 }
89 
mlx5_crypto_dek_get_key_sz(struct mlx5_core_dev * mdev,u32 sz_bytes,u8 * key_sz_p)90 static int mlx5_crypto_dek_get_key_sz(struct mlx5_core_dev *mdev,
91 				      u32 sz_bytes, u8 *key_sz_p)
92 {
93 	u32 sz_bits = sz_bytes * BITS_PER_BYTE;
94 
95 	switch (sz_bits) {
96 	case 128:
97 		*key_sz_p = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_128;
98 		break;
99 	case 256:
100 		*key_sz_p = MLX5_GENERAL_OBJECT_TYPE_ENCRYPTION_KEY_KEY_SIZE_256;
101 		break;
102 	default:
103 		mlx5_core_err(mdev, "Crypto offload error, invalid key size (%u bits)\n",
104 			      sz_bits);
105 		return -EINVAL;
106 	}
107 
108 	return 0;
109 }
110 
mlx5_crypto_dek_fill_key(struct mlx5_core_dev * mdev,u8 * key_obj,const void * key,u32 sz_bytes)111 static int mlx5_crypto_dek_fill_key(struct mlx5_core_dev *mdev, u8 *key_obj,
112 				    const void *key, u32 sz_bytes)
113 {
114 	void *dst;
115 	u8 key_sz;
116 	int err;
117 
118 	err = mlx5_crypto_dek_get_key_sz(mdev, sz_bytes, &key_sz);
119 	if (err)
120 		return err;
121 
122 	MLX5_SET(encryption_key_obj, key_obj, key_size, key_sz);
123 
124 	if (sz_bytes == 16)
125 		/* For key size of 128b the MSBs are reserved. */
126 		dst = MLX5_ADDR_OF(encryption_key_obj, key_obj, key[1]);
127 	else
128 		dst = MLX5_ADDR_OF(encryption_key_obj, key_obj, key);
129 
130 	memcpy(dst, key, sz_bytes);
131 
132 	return 0;
133 }
134 
mlx5_crypto_cmd_sync_crypto(struct mlx5_core_dev * mdev,int crypto_type)135 static int mlx5_crypto_cmd_sync_crypto(struct mlx5_core_dev *mdev,
136 				       int crypto_type)
137 {
138 	u32 in[MLX5_ST_SZ_DW(sync_crypto_in)] = {};
139 	int err;
140 
141 	mlx5_core_dbg(mdev,
142 		      "Execute SYNC_CRYPTO command with crypto_type(0x%x)\n",
143 		      crypto_type);
144 
145 	MLX5_SET(sync_crypto_in, in, opcode, MLX5_CMD_OP_SYNC_CRYPTO);
146 	MLX5_SET(sync_crypto_in, in, crypto_type, crypto_type);
147 
148 	err = mlx5_cmd_exec_in(mdev, sync_crypto, in);
149 	if (err)
150 		mlx5_core_err(mdev,
151 			      "Failed to exec sync crypto, type=%d, err=%d\n",
152 			      crypto_type, err);
153 
154 	return err;
155 }
156 
mlx5_crypto_create_dek_bulk(struct mlx5_core_dev * mdev,u32 key_purpose,int log_obj_range,u32 * obj_id)157 static int mlx5_crypto_create_dek_bulk(struct mlx5_core_dev *mdev,
158 				       u32 key_purpose, int log_obj_range,
159 				       u32 *obj_id)
160 {
161 	u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
162 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
163 	void *obj, *param;
164 	int err;
165 
166 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
167 		 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
168 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
169 		 MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
170 	param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param);
171 	MLX5_SET(general_obj_create_param, param, log_obj_range, log_obj_range);
172 
173 	obj = MLX5_ADDR_OF(create_encryption_key_in, in, encryption_key_object);
174 	MLX5_SET(encryption_key_obj, obj, key_purpose, key_purpose);
175 	MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.hw_objs.pdn);
176 
177 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
178 	if (err)
179 		return err;
180 
181 	*obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
182 	mlx5_core_dbg(mdev, "DEK objects created, bulk=%d, obj_id=%d\n",
183 		      1 << log_obj_range, *obj_id);
184 
185 	return 0;
186 }
187 
mlx5_crypto_modify_dek_key(struct mlx5_core_dev * mdev,const void * key,u32 sz_bytes,u32 key_purpose,u32 obj_id,u32 obj_offset)188 static int mlx5_crypto_modify_dek_key(struct mlx5_core_dev *mdev,
189 				      const void *key, u32 sz_bytes, u32 key_purpose,
190 				      u32 obj_id, u32 obj_offset)
191 {
192 	u32 in[MLX5_ST_SZ_DW(modify_encryption_key_in)] = {};
193 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
194 	void *obj, *param;
195 	int err;
196 
197 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
198 		 MLX5_CMD_OP_MODIFY_GENERAL_OBJECT);
199 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
200 		 MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
201 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
202 
203 	param = MLX5_ADDR_OF(general_obj_in_cmd_hdr, in, op_param);
204 	MLX5_SET(general_obj_query_param, param, obj_offset, obj_offset);
205 
206 	obj = MLX5_ADDR_OF(modify_encryption_key_in, in, encryption_key_object);
207 	MLX5_SET64(encryption_key_obj, obj, modify_field_select, 1);
208 	MLX5_SET(encryption_key_obj, obj, key_purpose, key_purpose);
209 	MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.hw_objs.pdn);
210 
211 	err = mlx5_crypto_dek_fill_key(mdev, obj, key, sz_bytes);
212 	if (err)
213 		return err;
214 
215 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
216 
217 	/* avoid leaking key on the stack */
218 	memzero_explicit(in, sizeof(in));
219 
220 	return err;
221 }
222 
mlx5_crypto_create_dek_key(struct mlx5_core_dev * mdev,const void * key,u32 sz_bytes,u32 key_purpose,u32 * p_key_id)223 static int mlx5_crypto_create_dek_key(struct mlx5_core_dev *mdev,
224 				      const void *key, u32 sz_bytes,
225 				      u32 key_purpose, u32 *p_key_id)
226 {
227 	u32 in[MLX5_ST_SZ_DW(create_encryption_key_in)] = {};
228 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
229 	u64 general_obj_types;
230 	void *obj;
231 	int err;
232 
233 	general_obj_types = MLX5_CAP_GEN_64(mdev, general_obj_types);
234 	if (!(general_obj_types &
235 	      MLX5_HCA_CAP_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY))
236 		return -EINVAL;
237 
238 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
239 		 MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
240 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
241 		 MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
242 
243 	obj = MLX5_ADDR_OF(create_encryption_key_in, in, encryption_key_object);
244 	MLX5_SET(encryption_key_obj, obj, key_purpose, key_purpose);
245 	MLX5_SET(encryption_key_obj, obj, pd, mdev->mlx5e_res.hw_objs.pdn);
246 
247 	err = mlx5_crypto_dek_fill_key(mdev, obj, key, sz_bytes);
248 	if (err)
249 		return err;
250 
251 	err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
252 	if (!err)
253 		*p_key_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
254 
255 	/* avoid leaking key on the stack */
256 	memzero_explicit(in, sizeof(in));
257 
258 	return err;
259 }
260 
mlx5_crypto_destroy_dek_key(struct mlx5_core_dev * mdev,u32 key_id)261 static void mlx5_crypto_destroy_dek_key(struct mlx5_core_dev *mdev, u32 key_id)
262 {
263 	u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
264 	u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
265 
266 	MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
267 		 MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
268 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
269 		 MLX5_GENERAL_OBJECT_TYPES_ENCRYPTION_KEY);
270 	MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, key_id);
271 
272 	mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
273 }
274 
mlx5_create_encryption_key(struct mlx5_core_dev * mdev,const void * key,u32 sz_bytes,u32 key_type,u32 * p_key_id)275 int mlx5_create_encryption_key(struct mlx5_core_dev *mdev,
276 			       const void *key, u32 sz_bytes,
277 			       u32 key_type, u32 *p_key_id)
278 {
279 	return mlx5_crypto_create_dek_key(mdev, key, sz_bytes, key_type, p_key_id);
280 }
281 
mlx5_destroy_encryption_key(struct mlx5_core_dev * mdev,u32 key_id)282 void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id)
283 {
284 	mlx5_crypto_destroy_dek_key(mdev, key_id);
285 }
286 
287 static struct mlx5_crypto_dek_bulk *
mlx5_crypto_dek_bulk_create(struct mlx5_crypto_dek_pool * pool)288 mlx5_crypto_dek_bulk_create(struct mlx5_crypto_dek_pool *pool)
289 {
290 	struct mlx5_crypto_dek_priv *dek_priv = pool->mdev->mlx5e_res.dek_priv;
291 	struct mlx5_core_dev *mdev = pool->mdev;
292 	struct mlx5_crypto_dek_bulk *bulk;
293 	int num_deks, base_obj_id;
294 	int err;
295 
296 	bulk = kzalloc(sizeof(*bulk), GFP_KERNEL);
297 	if (!bulk)
298 		return ERR_PTR(-ENOMEM);
299 
300 	num_deks = 1 << dek_priv->log_dek_obj_range;
301 	bulk->need_sync = bitmap_zalloc(num_deks, GFP_KERNEL);
302 	if (!bulk->need_sync) {
303 		err = -ENOMEM;
304 		goto err_out;
305 	}
306 
307 	bulk->in_use = bitmap_zalloc(num_deks, GFP_KERNEL);
308 	if (!bulk->in_use) {
309 		err = -ENOMEM;
310 		goto err_out;
311 	}
312 
313 	err = mlx5_crypto_create_dek_bulk(mdev, pool->key_purpose,
314 					  dek_priv->log_dek_obj_range,
315 					  &base_obj_id);
316 	if (err)
317 		goto err_out;
318 
319 	bulk->base_obj_id = base_obj_id;
320 	bulk->num_deks = num_deks;
321 	bulk->avail_deks = num_deks;
322 	bulk->mdev = mdev;
323 
324 	return bulk;
325 
326 err_out:
327 	bitmap_free(bulk->in_use);
328 	bitmap_free(bulk->need_sync);
329 	kfree(bulk);
330 	return ERR_PTR(err);
331 }
332 
333 static struct mlx5_crypto_dek_bulk *
mlx5_crypto_dek_pool_add_bulk(struct mlx5_crypto_dek_pool * pool)334 mlx5_crypto_dek_pool_add_bulk(struct mlx5_crypto_dek_pool *pool)
335 {
336 	struct mlx5_crypto_dek_bulk *bulk;
337 
338 	bulk = mlx5_crypto_dek_bulk_create(pool);
339 	if (IS_ERR(bulk))
340 		return bulk;
341 
342 	pool->avail_deks += bulk->num_deks;
343 	pool->num_deks += bulk->num_deks;
344 	list_add(&bulk->entry, &pool->partial_list);
345 
346 	return bulk;
347 }
348 
mlx5_crypto_dek_bulk_free(struct mlx5_crypto_dek_bulk * bulk)349 static void mlx5_crypto_dek_bulk_free(struct mlx5_crypto_dek_bulk *bulk)
350 {
351 	mlx5_crypto_destroy_dek_key(bulk->mdev, bulk->base_obj_id);
352 	bitmap_free(bulk->need_sync);
353 	bitmap_free(bulk->in_use);
354 	kfree(bulk);
355 }
356 
mlx5_crypto_dek_pool_remove_bulk(struct mlx5_crypto_dek_pool * pool,struct mlx5_crypto_dek_bulk * bulk,bool delay)357 static void mlx5_crypto_dek_pool_remove_bulk(struct mlx5_crypto_dek_pool *pool,
358 					     struct mlx5_crypto_dek_bulk *bulk,
359 					     bool delay)
360 {
361 	pool->num_deks -= bulk->num_deks;
362 	pool->avail_deks -= bulk->avail_deks;
363 	pool->in_use_deks -= bulk->in_use_deks;
364 	list_del(&bulk->entry);
365 	if (!delay)
366 		mlx5_crypto_dek_bulk_free(bulk);
367 }
368 
369 static struct mlx5_crypto_dek_bulk *
mlx5_crypto_dek_pool_pop(struct mlx5_crypto_dek_pool * pool,u32 * obj_offset)370 mlx5_crypto_dek_pool_pop(struct mlx5_crypto_dek_pool *pool, u32 *obj_offset)
371 {
372 	struct mlx5_crypto_dek_bulk *bulk;
373 	int pos;
374 
375 	mutex_lock(&pool->lock);
376 	bulk = list_first_entry_or_null(&pool->partial_list,
377 					struct mlx5_crypto_dek_bulk, entry);
378 
379 	if (bulk) {
380 		pos = find_next_zero_bit(bulk->need_sync, bulk->num_deks,
381 					 bulk->avail_start);
382 		if (pos == bulk->num_deks) {
383 			mlx5_core_err(pool->mdev, "Wrong DEK bulk avail_start.\n");
384 			pos = find_first_zero_bit(bulk->need_sync, bulk->num_deks);
385 		}
386 		WARN_ON(pos == bulk->num_deks);
387 	} else {
388 		bulk = list_first_entry_or_null(&pool->avail_list,
389 						struct mlx5_crypto_dek_bulk,
390 						entry);
391 		if (bulk) {
392 			list_move(&bulk->entry, &pool->partial_list);
393 		} else {
394 			bulk = mlx5_crypto_dek_pool_add_bulk(pool);
395 			if (IS_ERR(bulk))
396 				goto out;
397 		}
398 		pos = 0;
399 	}
400 
401 	*obj_offset = pos;
402 	bitmap_set(bulk->need_sync, pos, 1);
403 	bitmap_set(bulk->in_use, pos, 1);
404 	bulk->in_use_deks++;
405 	bulk->avail_deks--;
406 	if (!bulk->avail_deks) {
407 		list_move(&bulk->entry, &pool->full_list);
408 		bulk->avail_start = bulk->num_deks;
409 	} else {
410 		bulk->avail_start = pos + 1;
411 	}
412 	pool->avail_deks--;
413 	pool->in_use_deks++;
414 
415 out:
416 	mutex_unlock(&pool->lock);
417 	return bulk;
418 }
419 
mlx5_crypto_dek_need_sync(struct mlx5_crypto_dek_pool * pool)420 static bool mlx5_crypto_dek_need_sync(struct mlx5_crypto_dek_pool *pool)
421 {
422 	return !pool->syncing &&
423 	       MLX5_CRYPTO_DEK_POOL_CALC_FREED(pool) > MLX5_CRYPTO_DEK_POOL_SYNC_THRESH;
424 }
425 
mlx5_crypto_dek_free_locked(struct mlx5_crypto_dek_pool * pool,struct mlx5_crypto_dek * dek)426 static int mlx5_crypto_dek_free_locked(struct mlx5_crypto_dek_pool *pool,
427 				       struct mlx5_crypto_dek *dek)
428 {
429 	struct mlx5_crypto_dek_bulk *bulk = dek->bulk;
430 	int obj_offset;
431 	bool old_val;
432 	int err = 0;
433 
434 	obj_offset = dek->obj_id - bulk->base_obj_id;
435 	old_val = test_and_clear_bit(obj_offset, bulk->in_use);
436 	WARN_ON_ONCE(!old_val);
437 	if (!old_val) {
438 		err = -ENOENT;
439 		goto out_free;
440 	}
441 	pool->in_use_deks--;
442 	bulk->in_use_deks--;
443 	if (!bulk->avail_deks && !bulk->in_use_deks)
444 		list_move(&bulk->entry, &pool->sync_list);
445 
446 	if (mlx5_crypto_dek_need_sync(pool) && schedule_work(&pool->sync_work))
447 		pool->syncing = true;
448 
449 out_free:
450 	kfree(dek);
451 	return err;
452 }
453 
mlx5_crypto_dek_pool_push(struct mlx5_crypto_dek_pool * pool,struct mlx5_crypto_dek * dek)454 static int mlx5_crypto_dek_pool_push(struct mlx5_crypto_dek_pool *pool,
455 				     struct mlx5_crypto_dek *dek)
456 {
457 	int err = 0;
458 
459 	mutex_lock(&pool->lock);
460 	if (pool->syncing)
461 		list_add(&dek->entry, &pool->wait_for_free);
462 	else
463 		err = mlx5_crypto_dek_free_locked(pool, dek);
464 	mutex_unlock(&pool->lock);
465 
466 	return err;
467 }
468 
469 /* Update the bits for a bulk while sync, and avail_next for search.
470  * As the combinations of (need_sync, in_use) of one DEK are
471  *    - (0,0) means the key is ready for use,
472  *    - (1,1) means the key is currently being used by a user,
473  *    - (1,0) means the key is freed, and waiting for being synced,
474  *    - (0,1) is invalid state.
475  * the number of revalidated DEKs can be calculated by
476  * hweight_long(need_sync XOR in_use), and the need_sync bits can be reset
477  * by simply copying from in_use bits.
478  */
mlx5_crypto_dek_bulk_reset_synced(struct mlx5_crypto_dek_pool * pool,struct mlx5_crypto_dek_bulk * bulk)479 static void mlx5_crypto_dek_bulk_reset_synced(struct mlx5_crypto_dek_pool *pool,
480 					      struct mlx5_crypto_dek_bulk *bulk)
481 {
482 	unsigned long *need_sync = bulk->need_sync;
483 	unsigned long *in_use = bulk->in_use;
484 	int i, freed, reused, avail_next;
485 	bool first = true;
486 
487 	freed = MLX5_CRYPTO_DEK_BULK_CALC_FREED(bulk);
488 
489 	for (i = 0; freed && i < BITS_TO_LONGS(bulk->num_deks);
490 			i++, need_sync++, in_use++) {
491 		reused = hweight_long((*need_sync) ^ (*in_use));
492 		if (!reused)
493 			continue;
494 
495 		bulk->avail_deks += reused;
496 		pool->avail_deks += reused;
497 		*need_sync = *in_use;
498 		if (first) {
499 			avail_next = i * BITS_PER_TYPE(long);
500 			if (bulk->avail_start > avail_next)
501 				bulk->avail_start = avail_next;
502 			first = false;
503 		}
504 
505 		freed -= reused;
506 	}
507 }
508 
509 /* Return true if the bulk is reused, false if destroyed with delay */
mlx5_crypto_dek_bulk_handle_avail(struct mlx5_crypto_dek_pool * pool,struct mlx5_crypto_dek_bulk * bulk,struct list_head * destroy_list)510 static bool mlx5_crypto_dek_bulk_handle_avail(struct mlx5_crypto_dek_pool *pool,
511 					      struct mlx5_crypto_dek_bulk *bulk,
512 					      struct list_head *destroy_list)
513 {
514 	if (list_empty(&pool->avail_list)) {
515 		list_move(&bulk->entry, &pool->avail_list);
516 		return true;
517 	}
518 
519 	mlx5_crypto_dek_pool_remove_bulk(pool, bulk, true);
520 	list_add(&bulk->entry, destroy_list);
521 	return false;
522 }
523 
mlx5_crypto_dek_pool_splice_destroy_list(struct mlx5_crypto_dek_pool * pool,struct list_head * list,struct list_head * head)524 static void mlx5_crypto_dek_pool_splice_destroy_list(struct mlx5_crypto_dek_pool *pool,
525 						     struct list_head *list,
526 						     struct list_head *head)
527 {
528 	spin_lock(&pool->destroy_lock);
529 	list_splice_init(list, head);
530 	spin_unlock(&pool->destroy_lock);
531 }
532 
mlx5_crypto_dek_pool_free_wait_keys(struct mlx5_crypto_dek_pool * pool)533 static void mlx5_crypto_dek_pool_free_wait_keys(struct mlx5_crypto_dek_pool *pool)
534 {
535 	struct mlx5_crypto_dek *dek, *next;
536 
537 	list_for_each_entry_safe(dek, next, &pool->wait_for_free, entry) {
538 		list_del(&dek->entry);
539 		mlx5_crypto_dek_free_locked(pool, dek);
540 	}
541 }
542 
543 /* For all the bulks in each list, reset the bits while sync.
544  * Move them to different lists according to the number of available DEKs.
545  * Destrory all the idle bulks, except one for quick service.
546  * And free DEKs in the waiting list at the end of this func.
547  */
mlx5_crypto_dek_pool_reset_synced(struct mlx5_crypto_dek_pool * pool)548 static void mlx5_crypto_dek_pool_reset_synced(struct mlx5_crypto_dek_pool *pool)
549 {
550 	struct mlx5_crypto_dek_bulk *bulk, *tmp;
551 	LIST_HEAD(destroy_list);
552 
553 	list_for_each_entry_safe(bulk, tmp, &pool->partial_list, entry) {
554 		mlx5_crypto_dek_bulk_reset_synced(pool, bulk);
555 		if (MLX5_CRYPTO_DEK_BULK_IDLE(bulk))
556 			mlx5_crypto_dek_bulk_handle_avail(pool, bulk, &destroy_list);
557 	}
558 
559 	list_for_each_entry_safe(bulk, tmp, &pool->full_list, entry) {
560 		mlx5_crypto_dek_bulk_reset_synced(pool, bulk);
561 
562 		if (!bulk->avail_deks)
563 			continue;
564 
565 		if (MLX5_CRYPTO_DEK_BULK_IDLE(bulk))
566 			mlx5_crypto_dek_bulk_handle_avail(pool, bulk, &destroy_list);
567 		else
568 			list_move(&bulk->entry, &pool->partial_list);
569 	}
570 
571 	list_for_each_entry_safe(bulk, tmp, &pool->sync_list, entry) {
572 		bulk->avail_deks = bulk->num_deks;
573 		pool->avail_deks += bulk->num_deks;
574 		if (mlx5_crypto_dek_bulk_handle_avail(pool, bulk, &destroy_list)) {
575 			memset(bulk->need_sync, 0, BITS_TO_BYTES(bulk->num_deks));
576 			bulk->avail_start = 0;
577 		}
578 	}
579 
580 	mlx5_crypto_dek_pool_free_wait_keys(pool);
581 
582 	if (!list_empty(&destroy_list)) {
583 		mlx5_crypto_dek_pool_splice_destroy_list(pool, &destroy_list,
584 							 &pool->destroy_list);
585 		schedule_work(&pool->destroy_work);
586 	}
587 }
588 
mlx5_crypto_dek_sync_work_fn(struct work_struct * work)589 static void mlx5_crypto_dek_sync_work_fn(struct work_struct *work)
590 {
591 	struct mlx5_crypto_dek_pool *pool =
592 		container_of(work, struct mlx5_crypto_dek_pool, sync_work);
593 	int err;
594 
595 	err = mlx5_crypto_cmd_sync_crypto(pool->mdev, BIT(pool->key_purpose));
596 	mutex_lock(&pool->lock);
597 	if (!err)
598 		mlx5_crypto_dek_pool_reset_synced(pool);
599 	pool->syncing = false;
600 	mutex_unlock(&pool->lock);
601 }
602 
mlx5_crypto_dek_create(struct mlx5_crypto_dek_pool * dek_pool,const void * key,u32 sz_bytes)603 struct mlx5_crypto_dek *mlx5_crypto_dek_create(struct mlx5_crypto_dek_pool *dek_pool,
604 					       const void *key, u32 sz_bytes)
605 {
606 	struct mlx5_crypto_dek_priv *dek_priv = dek_pool->mdev->mlx5e_res.dek_priv;
607 	struct mlx5_core_dev *mdev = dek_pool->mdev;
608 	u32 key_purpose = dek_pool->key_purpose;
609 	struct mlx5_crypto_dek_bulk *bulk;
610 	struct mlx5_crypto_dek *dek;
611 	int obj_offset;
612 	int err;
613 
614 	dek = kzalloc(sizeof(*dek), GFP_KERNEL);
615 	if (!dek)
616 		return ERR_PTR(-ENOMEM);
617 
618 	if (!dek_priv) {
619 		err = mlx5_crypto_create_dek_key(mdev, key, sz_bytes,
620 						 key_purpose, &dek->obj_id);
621 		goto out;
622 	}
623 
624 	bulk = mlx5_crypto_dek_pool_pop(dek_pool, &obj_offset);
625 	if (IS_ERR(bulk)) {
626 		err = PTR_ERR(bulk);
627 		goto out;
628 	}
629 
630 	dek->bulk = bulk;
631 	dek->obj_id = bulk->base_obj_id + obj_offset;
632 	err = mlx5_crypto_modify_dek_key(mdev, key, sz_bytes, key_purpose,
633 					 bulk->base_obj_id, obj_offset);
634 	if (err) {
635 		mlx5_crypto_dek_pool_push(dek_pool, dek);
636 		return ERR_PTR(err);
637 	}
638 
639 out:
640 	if (err) {
641 		kfree(dek);
642 		return ERR_PTR(err);
643 	}
644 
645 	return dek;
646 }
647 
mlx5_crypto_dek_destroy(struct mlx5_crypto_dek_pool * dek_pool,struct mlx5_crypto_dek * dek)648 void mlx5_crypto_dek_destroy(struct mlx5_crypto_dek_pool *dek_pool,
649 			     struct mlx5_crypto_dek *dek)
650 {
651 	struct mlx5_crypto_dek_priv *dek_priv = dek_pool->mdev->mlx5e_res.dek_priv;
652 	struct mlx5_core_dev *mdev = dek_pool->mdev;
653 
654 	if (!dek_priv) {
655 		mlx5_crypto_destroy_dek_key(mdev, dek->obj_id);
656 		kfree(dek);
657 	} else {
658 		mlx5_crypto_dek_pool_push(dek_pool, dek);
659 	}
660 }
661 
mlx5_crypto_dek_free_destroy_list(struct list_head * destroy_list)662 static void mlx5_crypto_dek_free_destroy_list(struct list_head *destroy_list)
663 {
664 	struct mlx5_crypto_dek_bulk *bulk, *tmp;
665 
666 	list_for_each_entry_safe(bulk, tmp, destroy_list, entry)
667 		mlx5_crypto_dek_bulk_free(bulk);
668 }
669 
mlx5_crypto_dek_destroy_work_fn(struct work_struct * work)670 static void mlx5_crypto_dek_destroy_work_fn(struct work_struct *work)
671 {
672 	struct mlx5_crypto_dek_pool *pool =
673 		container_of(work, struct mlx5_crypto_dek_pool, destroy_work);
674 	LIST_HEAD(destroy_list);
675 
676 	mlx5_crypto_dek_pool_splice_destroy_list(pool, &pool->destroy_list,
677 						 &destroy_list);
678 	mlx5_crypto_dek_free_destroy_list(&destroy_list);
679 }
680 
681 struct mlx5_crypto_dek_pool *
mlx5_crypto_dek_pool_create(struct mlx5_core_dev * mdev,int key_purpose)682 mlx5_crypto_dek_pool_create(struct mlx5_core_dev *mdev, int key_purpose)
683 {
684 	struct mlx5_crypto_dek_pool *pool;
685 
686 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
687 	if (!pool)
688 		return ERR_PTR(-ENOMEM);
689 
690 	pool->mdev = mdev;
691 	pool->key_purpose = key_purpose;
692 
693 	mutex_init(&pool->lock);
694 	INIT_LIST_HEAD(&pool->avail_list);
695 	INIT_LIST_HEAD(&pool->partial_list);
696 	INIT_LIST_HEAD(&pool->full_list);
697 	INIT_LIST_HEAD(&pool->sync_list);
698 	INIT_LIST_HEAD(&pool->wait_for_free);
699 	INIT_WORK(&pool->sync_work, mlx5_crypto_dek_sync_work_fn);
700 	spin_lock_init(&pool->destroy_lock);
701 	INIT_LIST_HEAD(&pool->destroy_list);
702 	INIT_WORK(&pool->destroy_work, mlx5_crypto_dek_destroy_work_fn);
703 
704 	return pool;
705 }
706 
mlx5_crypto_dek_pool_destroy(struct mlx5_crypto_dek_pool * pool)707 void mlx5_crypto_dek_pool_destroy(struct mlx5_crypto_dek_pool *pool)
708 {
709 	struct mlx5_crypto_dek_bulk *bulk, *tmp;
710 
711 	cancel_work_sync(&pool->sync_work);
712 	cancel_work_sync(&pool->destroy_work);
713 
714 	mlx5_crypto_dek_pool_free_wait_keys(pool);
715 
716 	list_for_each_entry_safe(bulk, tmp, &pool->avail_list, entry)
717 		mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
718 
719 	list_for_each_entry_safe(bulk, tmp, &pool->full_list, entry)
720 		mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
721 
722 	list_for_each_entry_safe(bulk, tmp, &pool->sync_list, entry)
723 		mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
724 
725 	list_for_each_entry_safe(bulk, tmp, &pool->partial_list, entry)
726 		mlx5_crypto_dek_pool_remove_bulk(pool, bulk, false);
727 
728 	mlx5_crypto_dek_free_destroy_list(&pool->destroy_list);
729 
730 	mutex_destroy(&pool->lock);
731 
732 	kfree(pool);
733 }
734 
mlx5_crypto_dek_cleanup(struct mlx5_crypto_dek_priv * dek_priv)735 void mlx5_crypto_dek_cleanup(struct mlx5_crypto_dek_priv *dek_priv)
736 {
737 	if (!dek_priv)
738 		return;
739 
740 	kfree(dek_priv);
741 }
742 
mlx5_crypto_dek_init(struct mlx5_core_dev * mdev)743 struct mlx5_crypto_dek_priv *mlx5_crypto_dek_init(struct mlx5_core_dev *mdev)
744 {
745 	struct mlx5_crypto_dek_priv *dek_priv;
746 	int err;
747 
748 	if (!MLX5_CAP_CRYPTO(mdev, log_dek_max_alloc))
749 		return NULL;
750 
751 	dek_priv = kzalloc(sizeof(*dek_priv), GFP_KERNEL);
752 	if (!dek_priv)
753 		return ERR_PTR(-ENOMEM);
754 
755 	dek_priv->mdev = mdev;
756 	dek_priv->log_dek_obj_range = min_t(int, 12,
757 					    MLX5_CAP_CRYPTO(mdev, log_dek_max_alloc));
758 
759 	/* sync all types of objects */
760 	err = mlx5_crypto_cmd_sync_crypto(mdev, MLX5_CRYPTO_DEK_ALL_TYPE);
761 	if (err)
762 		goto err_sync_crypto;
763 
764 	mlx5_core_dbg(mdev, "Crypto DEK enabled, %d deks per alloc (max %d), total %d\n",
765 		      1 << dek_priv->log_dek_obj_range,
766 		      1 << MLX5_CAP_CRYPTO(mdev, log_dek_max_alloc),
767 		      1 << MLX5_CAP_CRYPTO(mdev, log_max_num_deks));
768 
769 	return dek_priv;
770 
771 err_sync_crypto:
772 	kfree(dek_priv);
773 	return ERR_PTR(err);
774 }
775