1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2019 Samsung Electronics Co., Ltd.
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/err.h>
9 #include <linux/slab.h>
10 #include <linux/wait.h>
11 #include <linux/sched.h>
12
13 #include "glob.h"
14 #include "crypto_ctx.h"
15
16 struct crypto_ctx_list {
17 spinlock_t ctx_lock;
18 int avail_ctx;
19 struct list_head idle_ctx;
20 wait_queue_head_t ctx_wait;
21 };
22
23 static struct crypto_ctx_list ctx_list;
24
free_aead(struct crypto_aead * aead)25 static inline void free_aead(struct crypto_aead *aead)
26 {
27 if (aead)
28 crypto_free_aead(aead);
29 }
30
free_shash(struct shash_desc * shash)31 static void free_shash(struct shash_desc *shash)
32 {
33 if (shash) {
34 crypto_free_shash(shash->tfm);
35 kfree(shash);
36 }
37 }
38
alloc_aead(int id)39 static struct crypto_aead *alloc_aead(int id)
40 {
41 struct crypto_aead *tfm = NULL;
42
43 switch (id) {
44 case CRYPTO_AEAD_AES_GCM:
45 tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
46 break;
47 case CRYPTO_AEAD_AES_CCM:
48 tfm = crypto_alloc_aead("ccm(aes)", 0, 0);
49 break;
50 default:
51 pr_err("Does not support encrypt ahead(id : %d)\n", id);
52 return NULL;
53 }
54
55 if (IS_ERR(tfm)) {
56 pr_err("Failed to alloc encrypt aead : %ld\n", PTR_ERR(tfm));
57 return NULL;
58 }
59
60 return tfm;
61 }
62
alloc_shash_desc(int id)63 static struct shash_desc *alloc_shash_desc(int id)
64 {
65 struct crypto_shash *tfm = NULL;
66 struct shash_desc *shash;
67
68 switch (id) {
69 case CRYPTO_SHASH_HMACMD5:
70 tfm = crypto_alloc_shash("hmac(md5)", 0, 0);
71 break;
72 case CRYPTO_SHASH_HMACSHA256:
73 tfm = crypto_alloc_shash("hmac(sha256)", 0, 0);
74 break;
75 case CRYPTO_SHASH_CMACAES:
76 tfm = crypto_alloc_shash("cmac(aes)", 0, 0);
77 break;
78 case CRYPTO_SHASH_SHA512:
79 tfm = crypto_alloc_shash("sha512", 0, 0);
80 break;
81 default:
82 return NULL;
83 }
84
85 if (IS_ERR(tfm))
86 return NULL;
87
88 shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm),
89 KSMBD_DEFAULT_GFP);
90 if (!shash)
91 crypto_free_shash(tfm);
92 else
93 shash->tfm = tfm;
94 return shash;
95 }
96
ctx_free(struct ksmbd_crypto_ctx * ctx)97 static void ctx_free(struct ksmbd_crypto_ctx *ctx)
98 {
99 int i;
100
101 for (i = 0; i < CRYPTO_SHASH_MAX; i++)
102 free_shash(ctx->desc[i]);
103 for (i = 0; i < CRYPTO_AEAD_MAX; i++)
104 free_aead(ctx->ccmaes[i]);
105 kfree(ctx);
106 }
107
ksmbd_find_crypto_ctx(void)108 static struct ksmbd_crypto_ctx *ksmbd_find_crypto_ctx(void)
109 {
110 struct ksmbd_crypto_ctx *ctx;
111
112 while (1) {
113 spin_lock(&ctx_list.ctx_lock);
114 if (!list_empty(&ctx_list.idle_ctx)) {
115 ctx = list_entry(ctx_list.idle_ctx.next,
116 struct ksmbd_crypto_ctx,
117 list);
118 list_del(&ctx->list);
119 spin_unlock(&ctx_list.ctx_lock);
120 return ctx;
121 }
122
123 if (ctx_list.avail_ctx > num_online_cpus()) {
124 spin_unlock(&ctx_list.ctx_lock);
125 wait_event(ctx_list.ctx_wait,
126 !list_empty(&ctx_list.idle_ctx));
127 continue;
128 }
129
130 ctx_list.avail_ctx++;
131 spin_unlock(&ctx_list.ctx_lock);
132
133 ctx = kzalloc(sizeof(struct ksmbd_crypto_ctx), KSMBD_DEFAULT_GFP);
134 if (!ctx) {
135 spin_lock(&ctx_list.ctx_lock);
136 ctx_list.avail_ctx--;
137 spin_unlock(&ctx_list.ctx_lock);
138 wait_event(ctx_list.ctx_wait,
139 !list_empty(&ctx_list.idle_ctx));
140 continue;
141 }
142 break;
143 }
144 return ctx;
145 }
146
ksmbd_release_crypto_ctx(struct ksmbd_crypto_ctx * ctx)147 void ksmbd_release_crypto_ctx(struct ksmbd_crypto_ctx *ctx)
148 {
149 if (!ctx)
150 return;
151
152 spin_lock(&ctx_list.ctx_lock);
153 if (ctx_list.avail_ctx <= num_online_cpus()) {
154 list_add(&ctx->list, &ctx_list.idle_ctx);
155 spin_unlock(&ctx_list.ctx_lock);
156 wake_up(&ctx_list.ctx_wait);
157 return;
158 }
159
160 ctx_list.avail_ctx--;
161 spin_unlock(&ctx_list.ctx_lock);
162 ctx_free(ctx);
163 }
164
____crypto_shash_ctx_find(int id)165 static struct ksmbd_crypto_ctx *____crypto_shash_ctx_find(int id)
166 {
167 struct ksmbd_crypto_ctx *ctx;
168
169 if (id >= CRYPTO_SHASH_MAX)
170 return NULL;
171
172 ctx = ksmbd_find_crypto_ctx();
173 if (ctx->desc[id])
174 return ctx;
175
176 ctx->desc[id] = alloc_shash_desc(id);
177 if (ctx->desc[id])
178 return ctx;
179 ksmbd_release_crypto_ctx(ctx);
180 return NULL;
181 }
182
ksmbd_crypto_ctx_find_hmacmd5(void)183 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacmd5(void)
184 {
185 return ____crypto_shash_ctx_find(CRYPTO_SHASH_HMACMD5);
186 }
187
ksmbd_crypto_ctx_find_hmacsha256(void)188 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacsha256(void)
189 {
190 return ____crypto_shash_ctx_find(CRYPTO_SHASH_HMACSHA256);
191 }
192
ksmbd_crypto_ctx_find_cmacaes(void)193 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_cmacaes(void)
194 {
195 return ____crypto_shash_ctx_find(CRYPTO_SHASH_CMACAES);
196 }
197
ksmbd_crypto_ctx_find_sha512(void)198 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void)
199 {
200 return ____crypto_shash_ctx_find(CRYPTO_SHASH_SHA512);
201 }
202
____crypto_aead_ctx_find(int id)203 static struct ksmbd_crypto_ctx *____crypto_aead_ctx_find(int id)
204 {
205 struct ksmbd_crypto_ctx *ctx;
206
207 if (id >= CRYPTO_AEAD_MAX)
208 return NULL;
209
210 ctx = ksmbd_find_crypto_ctx();
211 if (ctx->ccmaes[id])
212 return ctx;
213
214 ctx->ccmaes[id] = alloc_aead(id);
215 if (ctx->ccmaes[id])
216 return ctx;
217 ksmbd_release_crypto_ctx(ctx);
218 return NULL;
219 }
220
ksmbd_crypto_ctx_find_gcm(void)221 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_gcm(void)
222 {
223 return ____crypto_aead_ctx_find(CRYPTO_AEAD_AES_GCM);
224 }
225
ksmbd_crypto_ctx_find_ccm(void)226 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_ccm(void)
227 {
228 return ____crypto_aead_ctx_find(CRYPTO_AEAD_AES_CCM);
229 }
230
ksmbd_crypto_destroy(void)231 void ksmbd_crypto_destroy(void)
232 {
233 struct ksmbd_crypto_ctx *ctx;
234
235 while (!list_empty(&ctx_list.idle_ctx)) {
236 ctx = list_entry(ctx_list.idle_ctx.next,
237 struct ksmbd_crypto_ctx,
238 list);
239 list_del(&ctx->list);
240 ctx_free(ctx);
241 }
242 }
243
ksmbd_crypto_create(void)244 int ksmbd_crypto_create(void)
245 {
246 struct ksmbd_crypto_ctx *ctx;
247
248 spin_lock_init(&ctx_list.ctx_lock);
249 INIT_LIST_HEAD(&ctx_list.idle_ctx);
250 init_waitqueue_head(&ctx_list.ctx_wait);
251 ctx_list.avail_ctx = 1;
252
253 ctx = kzalloc(sizeof(struct ksmbd_crypto_ctx), KSMBD_DEFAULT_GFP);
254 if (!ctx)
255 return -ENOMEM;
256 list_add(&ctx->list, &ctx_list.idle_ctx);
257 return 0;
258 }
259