xref: /linux/drivers/nvme/host/auth.c (revision 9b960d8cd6f712cb2c03e2bdd4d5ca058238037f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
4  */
5 
6 #include <linux/crc32.h>
7 #include <linux/base64.h>
8 #include <linux/prandom.h>
9 #include <linux/unaligned.h>
10 #include <crypto/hash.h>
11 #include <crypto/dh.h>
12 #include "nvme.h"
13 #include "fabrics.h"
14 #include <linux/nvme-auth.h>
15 #include <linux/nvme-keyring.h>
16 
17 #define CHAP_BUF_SIZE 4096
18 static struct kmem_cache *nvme_chap_buf_cache;
19 static mempool_t *nvme_chap_buf_pool;
20 
21 struct nvme_dhchap_queue_context {
22 	struct list_head entry;
23 	struct work_struct auth_work;
24 	struct nvme_ctrl *ctrl;
25 	struct crypto_shash *shash_tfm;
26 	struct crypto_kpp *dh_tfm;
27 	struct nvme_dhchap_key *transformed_key;
28 	void *buf;
29 	int qid;
30 	int error;
31 	u32 s1;
32 	u32 s2;
33 	bool bi_directional;
34 	u16 transaction;
35 	u8 status;
36 	u8 dhgroup_id;
37 	u8 hash_id;
38 	size_t hash_len;
39 	u8 c1[64];
40 	u8 c2[64];
41 	u8 response[64];
42 	u8 *ctrl_key;
43 	u8 *host_key;
44 	u8 *sess_key;
45 	int ctrl_key_len;
46 	int host_key_len;
47 	int sess_key_len;
48 };
49 
50 static struct workqueue_struct *nvme_auth_wq;
51 
ctrl_max_dhchaps(struct nvme_ctrl * ctrl)52 static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl)
53 {
54 	return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues +
55 			ctrl->opts->nr_poll_queues + 1;
56 }
57 
nvme_auth_submit(struct nvme_ctrl * ctrl,int qid,void * data,size_t data_len,bool auth_send)58 static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
59 			    void *data, size_t data_len, bool auth_send)
60 {
61 	struct nvme_command cmd = {};
62 	nvme_submit_flags_t flags = NVME_SUBMIT_RETRY;
63 	struct request_queue *q = ctrl->fabrics_q;
64 	int ret;
65 
66 	if (qid != 0) {
67 		flags |= NVME_SUBMIT_NOWAIT | NVME_SUBMIT_RESERVED;
68 		q = ctrl->connect_q;
69 	}
70 
71 	cmd.auth_common.opcode = nvme_fabrics_command;
72 	cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
73 	cmd.auth_common.spsp0 = 0x01;
74 	cmd.auth_common.spsp1 = 0x01;
75 	if (auth_send) {
76 		cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
77 		cmd.auth_send.tl = cpu_to_le32(data_len);
78 	} else {
79 		cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
80 		cmd.auth_receive.al = cpu_to_le32(data_len);
81 	}
82 
83 	ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
84 				     qid == 0 ? NVME_QID_ANY : qid, flags);
85 	if (ret > 0)
86 		dev_warn(ctrl->device,
87 			"qid %d auth_send failed with status %d\n", qid, ret);
88 	else if (ret < 0)
89 		dev_err(ctrl->device,
90 			"qid %d auth_send failed with error %d\n", qid, ret);
91 	return ret;
92 }
93 
nvme_auth_receive_validate(struct nvme_ctrl * ctrl,int qid,struct nvmf_auth_dhchap_failure_data * data,u16 transaction,u8 expected_msg)94 static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
95 		struct nvmf_auth_dhchap_failure_data *data,
96 		u16 transaction, u8 expected_msg)
97 {
98 	dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
99 		__func__, qid, data->auth_type, data->auth_id);
100 
101 	if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
102 	    data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
103 		return data->rescode_exp;
104 	}
105 	if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
106 	    data->auth_id != expected_msg) {
107 		dev_warn(ctrl->device,
108 			 "qid %d invalid message %02x/%02x\n",
109 			 qid, data->auth_type, data->auth_id);
110 		return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
111 	}
112 	if (le16_to_cpu(data->t_id) != transaction) {
113 		dev_warn(ctrl->device,
114 			 "qid %d invalid transaction ID %d\n",
115 			 qid, le16_to_cpu(data->t_id));
116 		return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
117 	}
118 	return 0;
119 }
120 
nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)121 static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
122 		struct nvme_dhchap_queue_context *chap)
123 {
124 	struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
125 	size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
126 
127 	if (size > CHAP_BUF_SIZE) {
128 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
129 		return -EINVAL;
130 	}
131 	memset((u8 *)chap->buf, 0, size);
132 	data->auth_type = NVME_AUTH_COMMON_MESSAGES;
133 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
134 	data->t_id = cpu_to_le16(chap->transaction);
135 	if (ctrl->opts->concat && chap->qid == 0) {
136 		if (ctrl->opts->tls_key)
137 			data->sc_c = NVME_AUTH_SECP_REPLACETLSPSK;
138 		else
139 			data->sc_c = NVME_AUTH_SECP_NEWTLSPSK;
140 	} else
141 		data->sc_c = NVME_AUTH_SECP_NOSC;
142 	data->napd = 1;
143 	data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
144 	data->auth_protocol[0].dhchap.halen = 3;
145 	data->auth_protocol[0].dhchap.dhlen = 6;
146 	data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256;
147 	data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384;
148 	data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512;
149 	data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL;
150 	data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048;
151 	data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072;
152 	data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096;
153 	data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144;
154 	data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192;
155 
156 	return size;
157 }
158 
nvme_auth_process_dhchap_challenge(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)159 static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
160 		struct nvme_dhchap_queue_context *chap)
161 {
162 	struct nvmf_auth_dhchap_challenge_data *data = chap->buf;
163 	u16 dhvlen = le16_to_cpu(data->dhvlen);
164 	size_t size = sizeof(*data) + data->hl + dhvlen;
165 	const char *gid_name = nvme_auth_dhgroup_name(data->dhgid);
166 	const char *hmac_name, *kpp_name;
167 
168 	if (size > CHAP_BUF_SIZE) {
169 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
170 		return -EINVAL;
171 	}
172 
173 	hmac_name = nvme_auth_hmac_name(data->hashid);
174 	if (!hmac_name) {
175 		dev_warn(ctrl->device,
176 			 "qid %d: invalid HASH ID %d\n",
177 			 chap->qid, data->hashid);
178 		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
179 		return -EPROTO;
180 	}
181 
182 	if (chap->hash_id == data->hashid && chap->shash_tfm &&
183 	    !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) &&
184 	    crypto_shash_digestsize(chap->shash_tfm) == data->hl) {
185 		dev_dbg(ctrl->device,
186 			"qid %d: reuse existing hash %s\n",
187 			chap->qid, hmac_name);
188 		goto select_kpp;
189 	}
190 
191 	/* Reset if hash cannot be reused */
192 	if (chap->shash_tfm) {
193 		crypto_free_shash(chap->shash_tfm);
194 		chap->hash_id = 0;
195 		chap->hash_len = 0;
196 	}
197 	chap->shash_tfm = crypto_alloc_shash(hmac_name, 0,
198 					     CRYPTO_ALG_ALLOCATES_MEMORY);
199 	if (IS_ERR(chap->shash_tfm)) {
200 		dev_warn(ctrl->device,
201 			 "qid %d: failed to allocate hash %s, error %ld\n",
202 			 chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
203 		chap->shash_tfm = NULL;
204 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
205 		return -ENOMEM;
206 	}
207 
208 	if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
209 		dev_warn(ctrl->device,
210 			 "qid %d: invalid hash length %d\n",
211 			 chap->qid, data->hl);
212 		crypto_free_shash(chap->shash_tfm);
213 		chap->shash_tfm = NULL;
214 		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
215 		return -EPROTO;
216 	}
217 
218 	chap->hash_id = data->hashid;
219 	chap->hash_len = data->hl;
220 	dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
221 		chap->qid, hmac_name);
222 
223 select_kpp:
224 	kpp_name = nvme_auth_dhgroup_kpp(data->dhgid);
225 	if (!kpp_name) {
226 		dev_warn(ctrl->device,
227 			 "qid %d: invalid DH group id %d\n",
228 			 chap->qid, data->dhgid);
229 		chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
230 		/* Leave previous dh_tfm intact */
231 		return -EPROTO;
232 	}
233 
234 	if (chap->dhgroup_id == data->dhgid &&
235 	    (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) {
236 		dev_dbg(ctrl->device,
237 			"qid %d: reuse existing DH group %s\n",
238 			chap->qid, gid_name);
239 		goto skip_kpp;
240 	}
241 
242 	/* Reset dh_tfm if it can't be reused */
243 	if (chap->dh_tfm) {
244 		crypto_free_kpp(chap->dh_tfm);
245 		chap->dh_tfm = NULL;
246 	}
247 
248 	if (data->dhgid != NVME_AUTH_DHGROUP_NULL) {
249 		if (dhvlen == 0) {
250 			dev_warn(ctrl->device,
251 				 "qid %d: empty DH value\n",
252 				 chap->qid);
253 			chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
254 			return -EPROTO;
255 		}
256 
257 		chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0);
258 		if (IS_ERR(chap->dh_tfm)) {
259 			int ret = PTR_ERR(chap->dh_tfm);
260 
261 			dev_warn(ctrl->device,
262 				 "qid %d: error %d initializing DH group %s\n",
263 				 chap->qid, ret, gid_name);
264 			chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
265 			chap->dh_tfm = NULL;
266 			return ret;
267 		}
268 		dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
269 			chap->qid, gid_name);
270 	} else if (dhvlen != 0) {
271 		dev_warn(ctrl->device,
272 			 "qid %d: invalid DH value for NULL DH\n",
273 			 chap->qid);
274 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
275 		return -EPROTO;
276 	}
277 	chap->dhgroup_id = data->dhgid;
278 
279 skip_kpp:
280 	chap->s1 = le32_to_cpu(data->seqnum);
281 	memcpy(chap->c1, data->cval, chap->hash_len);
282 	if (dhvlen) {
283 		chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL);
284 		if (!chap->ctrl_key) {
285 			chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
286 			return -ENOMEM;
287 		}
288 		chap->ctrl_key_len = dhvlen;
289 		memcpy(chap->ctrl_key, data->cval + chap->hash_len,
290 		       dhvlen);
291 		dev_dbg(ctrl->device, "ctrl public key %*ph\n",
292 			 (int)chap->ctrl_key_len, chap->ctrl_key);
293 	}
294 
295 	return 0;
296 }
297 
nvme_auth_set_dhchap_reply_data(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)298 static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
299 		struct nvme_dhchap_queue_context *chap)
300 {
301 	struct nvmf_auth_dhchap_reply_data *data = chap->buf;
302 	size_t size = sizeof(*data);
303 
304 	size += 2 * chap->hash_len;
305 
306 	if (chap->host_key_len)
307 		size += chap->host_key_len;
308 
309 	if (size > CHAP_BUF_SIZE) {
310 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
311 		return -EINVAL;
312 	}
313 
314 	memset(chap->buf, 0, size);
315 	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
316 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
317 	data->t_id = cpu_to_le16(chap->transaction);
318 	data->hl = chap->hash_len;
319 	data->dhvlen = cpu_to_le16(chap->host_key_len);
320 	memcpy(data->rval, chap->response, chap->hash_len);
321 	if (ctrl->ctrl_key)
322 		chap->bi_directional = true;
323 	if (ctrl->ctrl_key || ctrl->opts->concat) {
324 		get_random_bytes(chap->c2, chap->hash_len);
325 		data->cvalid = 1;
326 		memcpy(data->rval + chap->hash_len, chap->c2,
327 		       chap->hash_len);
328 		dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
329 			__func__, chap->qid, (int)chap->hash_len, chap->c2);
330 	} else {
331 		memset(chap->c2, 0, chap->hash_len);
332 	}
333 	if (ctrl->opts->concat)
334 		chap->s2 = 0;
335 	else
336 		chap->s2 = nvme_auth_get_seqnum();
337 	data->seqnum = cpu_to_le32(chap->s2);
338 	if (chap->host_key_len) {
339 		dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
340 			__func__, chap->qid,
341 			chap->host_key_len, chap->host_key);
342 		memcpy(data->rval + 2 * chap->hash_len, chap->host_key,
343 		       chap->host_key_len);
344 	}
345 
346 	return size;
347 }
348 
nvme_auth_process_dhchap_success1(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)349 static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
350 		struct nvme_dhchap_queue_context *chap)
351 {
352 	struct nvmf_auth_dhchap_success1_data *data = chap->buf;
353 	size_t size = sizeof(*data) + chap->hash_len;
354 
355 	if (size > CHAP_BUF_SIZE) {
356 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
357 		return -EINVAL;
358 	}
359 
360 	if (data->hl != chap->hash_len) {
361 		dev_warn(ctrl->device,
362 			 "qid %d: invalid hash length %u\n",
363 			 chap->qid, data->hl);
364 		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
365 		return -EPROTO;
366 	}
367 
368 	/* Just print out information for the admin queue */
369 	if (chap->qid == 0)
370 		dev_info(ctrl->device,
371 			 "qid 0: authenticated with hash %s dhgroup %s\n",
372 			 nvme_auth_hmac_name(chap->hash_id),
373 			 nvme_auth_dhgroup_name(chap->dhgroup_id));
374 
375 	if (!data->rvalid)
376 		return 0;
377 
378 	/* Validate controller response */
379 	if (memcmp(chap->response, data->rval, data->hl)) {
380 		dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n",
381 			__func__, chap->qid, (int)chap->hash_len, data->rval);
382 		dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n",
383 			__func__, chap->qid, (int)chap->hash_len,
384 			chap->response);
385 		dev_warn(ctrl->device,
386 			 "qid %d: controller authentication failed\n",
387 			 chap->qid);
388 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
389 		return -ECONNREFUSED;
390 	}
391 
392 	/* Just print out information for the admin queue */
393 	if (chap->qid == 0)
394 		dev_info(ctrl->device,
395 			 "qid 0: controller authenticated\n");
396 	return 0;
397 }
398 
nvme_auth_set_dhchap_success2_data(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)399 static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl,
400 		struct nvme_dhchap_queue_context *chap)
401 {
402 	struct nvmf_auth_dhchap_success2_data *data = chap->buf;
403 	size_t size = sizeof(*data);
404 
405 	memset(chap->buf, 0, size);
406 	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
407 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
408 	data->t_id = cpu_to_le16(chap->transaction);
409 
410 	return size;
411 }
412 
nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)413 static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
414 		struct nvme_dhchap_queue_context *chap)
415 {
416 	struct nvmf_auth_dhchap_failure_data *data = chap->buf;
417 	size_t size = sizeof(*data);
418 
419 	memset(chap->buf, 0, size);
420 	data->auth_type = NVME_AUTH_COMMON_MESSAGES;
421 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
422 	data->t_id = cpu_to_le16(chap->transaction);
423 	data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
424 	data->rescode_exp = chap->status;
425 
426 	return size;
427 }
428 
nvme_auth_dhchap_setup_host_response(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)429 static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
430 		struct nvme_dhchap_queue_context *chap)
431 {
432 	SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
433 	u8 buf[4], *challenge = chap->c1;
434 	int ret;
435 
436 	dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n",
437 		__func__, chap->qid, chap->s1, chap->transaction);
438 
439 	if (!chap->transformed_key) {
440 		chap->transformed_key = nvme_auth_transform_key(ctrl->host_key,
441 						ctrl->opts->host->nqn);
442 		if (IS_ERR(chap->transformed_key)) {
443 			ret = PTR_ERR(chap->transformed_key);
444 			chap->transformed_key = NULL;
445 			return ret;
446 		}
447 	} else {
448 		dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
449 			__func__, chap->qid);
450 	}
451 
452 	ret = crypto_shash_setkey(chap->shash_tfm,
453 			chap->transformed_key->key, chap->transformed_key->len);
454 	if (ret) {
455 		dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
456 			 chap->qid, ret);
457 		goto out;
458 	}
459 
460 	if (chap->dh_tfm) {
461 		challenge = kmalloc(chap->hash_len, GFP_KERNEL);
462 		if (!challenge) {
463 			ret = -ENOMEM;
464 			goto out;
465 		}
466 		ret = nvme_auth_augmented_challenge(chap->hash_id,
467 						    chap->sess_key,
468 						    chap->sess_key_len,
469 						    chap->c1, challenge,
470 						    chap->hash_len);
471 		if (ret)
472 			goto out;
473 	}
474 
475 	shash->tfm = chap->shash_tfm;
476 	ret = crypto_shash_init(shash);
477 	if (ret)
478 		goto out;
479 	ret = crypto_shash_update(shash, challenge, chap->hash_len);
480 	if (ret)
481 		goto out;
482 	put_unaligned_le32(chap->s1, buf);
483 	ret = crypto_shash_update(shash, buf, 4);
484 	if (ret)
485 		goto out;
486 	put_unaligned_le16(chap->transaction, buf);
487 	ret = crypto_shash_update(shash, buf, 2);
488 	if (ret)
489 		goto out;
490 	memset(buf, 0, sizeof(buf));
491 	ret = crypto_shash_update(shash, buf, 1);
492 	if (ret)
493 		goto out;
494 	ret = crypto_shash_update(shash, "HostHost", 8);
495 	if (ret)
496 		goto out;
497 	ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
498 				  strlen(ctrl->opts->host->nqn));
499 	if (ret)
500 		goto out;
501 	ret = crypto_shash_update(shash, buf, 1);
502 	if (ret)
503 		goto out;
504 	ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
505 			    strlen(ctrl->opts->subsysnqn));
506 	if (ret)
507 		goto out;
508 	ret = crypto_shash_final(shash, chap->response);
509 out:
510 	if (challenge != chap->c1)
511 		kfree(challenge);
512 	return ret;
513 }
514 
nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)515 static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
516 		struct nvme_dhchap_queue_context *chap)
517 {
518 	SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
519 	struct nvme_dhchap_key *transformed_key;
520 	u8 buf[4], *challenge = chap->c2;
521 	int ret;
522 
523 	transformed_key = nvme_auth_transform_key(ctrl->ctrl_key,
524 				ctrl->opts->subsysnqn);
525 	if (IS_ERR(transformed_key)) {
526 		ret = PTR_ERR(transformed_key);
527 		return ret;
528 	}
529 
530 	ret = crypto_shash_setkey(chap->shash_tfm,
531 			transformed_key->key, transformed_key->len);
532 	if (ret) {
533 		dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
534 			 chap->qid, ret);
535 		goto out;
536 	}
537 
538 	if (chap->dh_tfm) {
539 		challenge = kmalloc(chap->hash_len, GFP_KERNEL);
540 		if (!challenge) {
541 			ret = -ENOMEM;
542 			goto out;
543 		}
544 		ret = nvme_auth_augmented_challenge(chap->hash_id,
545 						    chap->sess_key,
546 						    chap->sess_key_len,
547 						    chap->c2, challenge,
548 						    chap->hash_len);
549 		if (ret)
550 			goto out;
551 	}
552 	dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n",
553 		__func__, chap->qid, chap->s2, chap->transaction);
554 	dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n",
555 		__func__, chap->qid, (int)chap->hash_len, challenge);
556 	dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n",
557 		__func__, chap->qid, ctrl->opts->subsysnqn);
558 	dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
559 		__func__, chap->qid, ctrl->opts->host->nqn);
560 	shash->tfm = chap->shash_tfm;
561 	ret = crypto_shash_init(shash);
562 	if (ret)
563 		goto out;
564 	ret = crypto_shash_update(shash, challenge, chap->hash_len);
565 	if (ret)
566 		goto out;
567 	put_unaligned_le32(chap->s2, buf);
568 	ret = crypto_shash_update(shash, buf, 4);
569 	if (ret)
570 		goto out;
571 	put_unaligned_le16(chap->transaction, buf);
572 	ret = crypto_shash_update(shash, buf, 2);
573 	if (ret)
574 		goto out;
575 	memset(buf, 0, 4);
576 	ret = crypto_shash_update(shash, buf, 1);
577 	if (ret)
578 		goto out;
579 	ret = crypto_shash_update(shash, "Controller", 10);
580 	if (ret)
581 		goto out;
582 	ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
583 				  strlen(ctrl->opts->subsysnqn));
584 	if (ret)
585 		goto out;
586 	ret = crypto_shash_update(shash, buf, 1);
587 	if (ret)
588 		goto out;
589 	ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
590 				  strlen(ctrl->opts->host->nqn));
591 	if (ret)
592 		goto out;
593 	ret = crypto_shash_final(shash, chap->response);
594 out:
595 	if (challenge != chap->c2)
596 		kfree(challenge);
597 	nvme_auth_free_key(transformed_key);
598 	return ret;
599 }
600 
nvme_auth_dhchap_exponential(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)601 static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl,
602 		struct nvme_dhchap_queue_context *chap)
603 {
604 	int ret;
605 
606 	if (chap->host_key && chap->host_key_len) {
607 		dev_dbg(ctrl->device,
608 			"qid %d: reusing host key\n", chap->qid);
609 		goto gen_sesskey;
610 	}
611 	ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id);
612 	if (ret < 0) {
613 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
614 		return ret;
615 	}
616 
617 	chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm);
618 
619 	chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL);
620 	if (!chap->host_key) {
621 		chap->host_key_len = 0;
622 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
623 		return -ENOMEM;
624 	}
625 	ret = nvme_auth_gen_pubkey(chap->dh_tfm,
626 				   chap->host_key, chap->host_key_len);
627 	if (ret) {
628 		dev_dbg(ctrl->device,
629 			"failed to generate public key, error %d\n", ret);
630 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
631 		return ret;
632 	}
633 
634 gen_sesskey:
635 	chap->sess_key_len = chap->host_key_len;
636 	chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL);
637 	if (!chap->sess_key) {
638 		chap->sess_key_len = 0;
639 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
640 		return -ENOMEM;
641 	}
642 
643 	ret = nvme_auth_gen_shared_secret(chap->dh_tfm,
644 					  chap->ctrl_key, chap->ctrl_key_len,
645 					  chap->sess_key, chap->sess_key_len);
646 	if (ret) {
647 		dev_dbg(ctrl->device,
648 			"failed to generate shared secret, error %d\n", ret);
649 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
650 		return ret;
651 	}
652 	dev_dbg(ctrl->device, "shared secret %*ph\n",
653 		(int)chap->sess_key_len, chap->sess_key);
654 	return 0;
655 }
656 
nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context * chap)657 static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
658 {
659 	nvme_auth_free_key(chap->transformed_key);
660 	chap->transformed_key = NULL;
661 	kfree_sensitive(chap->host_key);
662 	chap->host_key = NULL;
663 	chap->host_key_len = 0;
664 	kfree_sensitive(chap->ctrl_key);
665 	chap->ctrl_key = NULL;
666 	chap->ctrl_key_len = 0;
667 	kfree_sensitive(chap->sess_key);
668 	chap->sess_key = NULL;
669 	chap->sess_key_len = 0;
670 	chap->status = 0;
671 	chap->error = 0;
672 	chap->s1 = 0;
673 	chap->s2 = 0;
674 	chap->bi_directional = false;
675 	chap->transaction = 0;
676 	memset(chap->c1, 0, sizeof(chap->c1));
677 	memset(chap->c2, 0, sizeof(chap->c2));
678 	mempool_free(chap->buf, nvme_chap_buf_pool);
679 	chap->buf = NULL;
680 }
681 
nvme_auth_free_dhchap(struct nvme_dhchap_queue_context * chap)682 static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap)
683 {
684 	nvme_auth_reset_dhchap(chap);
685 	if (chap->shash_tfm)
686 		crypto_free_shash(chap->shash_tfm);
687 	if (chap->dh_tfm)
688 		crypto_free_kpp(chap->dh_tfm);
689 }
690 
nvme_auth_revoke_tls_key(struct nvme_ctrl * ctrl)691 void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl)
692 {
693 	dev_dbg(ctrl->device, "Wipe generated TLS PSK %08x\n",
694 		key_serial(ctrl->opts->tls_key));
695 	key_revoke(ctrl->opts->tls_key);
696 	key_put(ctrl->opts->tls_key);
697 	ctrl->opts->tls_key = NULL;
698 }
699 EXPORT_SYMBOL_GPL(nvme_auth_revoke_tls_key);
700 
nvme_auth_secure_concat(struct nvme_ctrl * ctrl,struct nvme_dhchap_queue_context * chap)701 static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl,
702 				   struct nvme_dhchap_queue_context *chap)
703 {
704 	u8 *psk, *digest, *tls_psk;
705 	struct key *tls_key;
706 	size_t psk_len;
707 	int ret = 0;
708 
709 	if (!chap->sess_key) {
710 		dev_warn(ctrl->device,
711 			 "%s: qid %d no session key negotiated\n",
712 			 __func__, chap->qid);
713 		return -ENOKEY;
714 	}
715 
716 	if (chap->qid) {
717 		dev_warn(ctrl->device,
718 			 "qid %d: secure concatenation not supported on I/O queues\n",
719 			 chap->qid);
720 		return -EINVAL;
721 	}
722 	ret = nvme_auth_generate_psk(chap->hash_id, chap->sess_key,
723 				     chap->sess_key_len,
724 				     chap->c1, chap->c2,
725 				     chap->hash_len, &psk, &psk_len);
726 	if (ret) {
727 		dev_warn(ctrl->device,
728 			 "%s: qid %d failed to generate PSK, error %d\n",
729 			 __func__, chap->qid, ret);
730 		return ret;
731 	}
732 	dev_dbg(ctrl->device,
733 		  "%s: generated psk %*ph\n", __func__, (int)psk_len, psk);
734 
735 	ret = nvme_auth_generate_digest(chap->hash_id, psk, psk_len,
736 					ctrl->opts->subsysnqn,
737 					ctrl->opts->host->nqn, &digest);
738 	if (ret) {
739 		dev_warn(ctrl->device,
740 			 "%s: qid %d failed to generate digest, error %d\n",
741 			 __func__, chap->qid, ret);
742 		goto out_free_psk;
743 	};
744 	dev_dbg(ctrl->device, "%s: generated digest %s\n",
745 		 __func__, digest);
746 	ret = nvme_auth_derive_tls_psk(chap->hash_id, psk, psk_len,
747 				       digest, &tls_psk);
748 	if (ret) {
749 		dev_warn(ctrl->device,
750 			 "%s: qid %d failed to derive TLS psk, error %d\n",
751 			 __func__, chap->qid, ret);
752 		goto out_free_digest;
753 	};
754 
755 	tls_key = nvme_tls_psk_refresh(ctrl->opts->keyring,
756 				       ctrl->opts->host->nqn,
757 				       ctrl->opts->subsysnqn, chap->hash_id,
758 				       tls_psk, psk_len, digest);
759 	if (IS_ERR(tls_key)) {
760 		ret = PTR_ERR(tls_key);
761 		dev_warn(ctrl->device,
762 			 "%s: qid %d failed to insert generated key, error %d\n",
763 			 __func__, chap->qid, ret);
764 		tls_key = NULL;
765 	}
766 	kfree_sensitive(tls_psk);
767 	if (ctrl->opts->tls_key)
768 		nvme_auth_revoke_tls_key(ctrl);
769 	ctrl->opts->tls_key = tls_key;
770 out_free_digest:
771 	kfree_sensitive(digest);
772 out_free_psk:
773 	kfree_sensitive(psk);
774 	return ret;
775 }
776 
nvme_queue_auth_work(struct work_struct * work)777 static void nvme_queue_auth_work(struct work_struct *work)
778 {
779 	struct nvme_dhchap_queue_context *chap =
780 		container_of(work, struct nvme_dhchap_queue_context, auth_work);
781 	struct nvme_ctrl *ctrl = chap->ctrl;
782 	size_t tl;
783 	int ret = 0;
784 
785 	/*
786 	 * Allocate a large enough buffer for the entire negotiation:
787 	 * 4k is enough to ffdhe8192.
788 	 */
789 	chap->buf = mempool_alloc(nvme_chap_buf_pool, GFP_KERNEL);
790 	if (!chap->buf) {
791 		chap->error = -ENOMEM;
792 		return;
793 	}
794 
795 	chap->transaction = ctrl->transaction++;
796 
797 	/* DH-HMAC-CHAP Step 1: send negotiate */
798 	dev_dbg(ctrl->device, "%s: qid %d send negotiate\n",
799 		__func__, chap->qid);
800 	ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap);
801 	if (ret < 0) {
802 		chap->error = ret;
803 		return;
804 	}
805 	tl = ret;
806 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
807 	if (ret) {
808 		chap->error = ret;
809 		return;
810 	}
811 
812 	/* DH-HMAC-CHAP Step 2: receive challenge */
813 	dev_dbg(ctrl->device, "%s: qid %d receive challenge\n",
814 		__func__, chap->qid);
815 
816 	memset(chap->buf, 0, CHAP_BUF_SIZE);
817 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
818 			       false);
819 	if (ret) {
820 		dev_warn(ctrl->device,
821 			 "qid %d failed to receive challenge, %s %d\n",
822 			 chap->qid, ret < 0 ? "error" : "nvme status", ret);
823 		chap->error = ret;
824 		return;
825 	}
826 	ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction,
827 					 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
828 	if (ret) {
829 		chap->status = ret;
830 		chap->error = -EKEYREJECTED;
831 		return;
832 	}
833 
834 	ret = nvme_auth_process_dhchap_challenge(ctrl, chap);
835 	if (ret) {
836 		/* Invalid challenge parameters */
837 		chap->error = ret;
838 		goto fail2;
839 	}
840 
841 	if (chap->ctrl_key_len) {
842 		dev_dbg(ctrl->device,
843 			"%s: qid %d DH exponential\n",
844 			__func__, chap->qid);
845 		ret = nvme_auth_dhchap_exponential(ctrl, chap);
846 		if (ret) {
847 			chap->error = ret;
848 			goto fail2;
849 		}
850 	}
851 
852 	dev_dbg(ctrl->device, "%s: qid %d host response\n",
853 		__func__, chap->qid);
854 	mutex_lock(&ctrl->dhchap_auth_mutex);
855 	ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
856 	mutex_unlock(&ctrl->dhchap_auth_mutex);
857 	if (ret) {
858 		chap->error = ret;
859 		goto fail2;
860 	}
861 
862 	/* DH-HMAC-CHAP Step 3: send reply */
863 	dev_dbg(ctrl->device, "%s: qid %d send reply\n",
864 		__func__, chap->qid);
865 	ret = nvme_auth_set_dhchap_reply_data(ctrl, chap);
866 	if (ret < 0) {
867 		chap->error = ret;
868 		goto fail2;
869 	}
870 
871 	tl = ret;
872 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
873 	if (ret) {
874 		chap->error = ret;
875 		goto fail2;
876 	}
877 
878 	/* DH-HMAC-CHAP Step 4: receive success1 */
879 	dev_dbg(ctrl->device, "%s: qid %d receive success1\n",
880 		__func__, chap->qid);
881 
882 	memset(chap->buf, 0, CHAP_BUF_SIZE);
883 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
884 			       false);
885 	if (ret) {
886 		dev_warn(ctrl->device,
887 			 "qid %d failed to receive success1, %s %d\n",
888 			 chap->qid, ret < 0 ? "error" : "nvme status", ret);
889 		chap->error = ret;
890 		return;
891 	}
892 	ret = nvme_auth_receive_validate(ctrl, chap->qid,
893 					 chap->buf, chap->transaction,
894 					 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
895 	if (ret) {
896 		chap->status = ret;
897 		chap->error = -EKEYREJECTED;
898 		return;
899 	}
900 
901 	mutex_lock(&ctrl->dhchap_auth_mutex);
902 	if (ctrl->ctrl_key) {
903 		dev_dbg(ctrl->device,
904 			"%s: qid %d controller response\n",
905 			__func__, chap->qid);
906 		ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap);
907 		if (ret) {
908 			mutex_unlock(&ctrl->dhchap_auth_mutex);
909 			chap->error = ret;
910 			goto fail2;
911 		}
912 	}
913 	mutex_unlock(&ctrl->dhchap_auth_mutex);
914 
915 	ret = nvme_auth_process_dhchap_success1(ctrl, chap);
916 	if (ret) {
917 		/* Controller authentication failed */
918 		chap->error = -EKEYREJECTED;
919 		goto fail2;
920 	}
921 
922 	if (chap->bi_directional) {
923 		/* DH-HMAC-CHAP Step 5: send success2 */
924 		dev_dbg(ctrl->device, "%s: qid %d send success2\n",
925 			__func__, chap->qid);
926 		tl = nvme_auth_set_dhchap_success2_data(ctrl, chap);
927 		ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
928 		if (ret)
929 			chap->error = ret;
930 	}
931 	if (!ret) {
932 		chap->error = 0;
933 		if (ctrl->opts->concat &&
934 		    (ret = nvme_auth_secure_concat(ctrl, chap))) {
935 			dev_warn(ctrl->device,
936 				 "%s: qid %d failed to enable secure concatenation\n",
937 				 __func__, chap->qid);
938 			chap->error = ret;
939 		}
940 		return;
941 	}
942 
943 fail2:
944 	if (chap->status == 0)
945 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
946 	dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
947 		__func__, chap->qid, chap->status);
948 	tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
949 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
950 	/*
951 	 * only update error if send failure2 failed and no other
952 	 * error had been set during authentication.
953 	 */
954 	if (ret && !chap->error)
955 		chap->error = ret;
956 }
957 
nvme_auth_negotiate(struct nvme_ctrl * ctrl,int qid)958 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
959 {
960 	struct nvme_dhchap_queue_context *chap;
961 
962 	if (!ctrl->host_key) {
963 		dev_warn(ctrl->device, "qid %d: no key\n", qid);
964 		return -ENOKEY;
965 	}
966 
967 	if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) {
968 		dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid);
969 		return -ENOKEY;
970 	}
971 
972 	chap = &ctrl->dhchap_ctxs[qid];
973 	cancel_work_sync(&chap->auth_work);
974 	queue_work(nvme_auth_wq, &chap->auth_work);
975 	return 0;
976 }
977 EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
978 
nvme_auth_wait(struct nvme_ctrl * ctrl,int qid)979 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
980 {
981 	struct nvme_dhchap_queue_context *chap;
982 	int ret;
983 
984 	chap = &ctrl->dhchap_ctxs[qid];
985 	flush_work(&chap->auth_work);
986 	ret = chap->error;
987 	/* clear sensitive info */
988 	nvme_auth_reset_dhchap(chap);
989 	return ret;
990 }
991 EXPORT_SYMBOL_GPL(nvme_auth_wait);
992 
nvme_ctrl_auth_work(struct work_struct * work)993 static void nvme_ctrl_auth_work(struct work_struct *work)
994 {
995 	struct nvme_ctrl *ctrl =
996 		container_of(work, struct nvme_ctrl, dhchap_auth_work);
997 	int ret, q;
998 
999 	/*
1000 	 * If the ctrl is no connected, bail as reconnect will handle
1001 	 * authentication.
1002 	 */
1003 	if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
1004 		return;
1005 
1006 	/* Authenticate admin queue first */
1007 	ret = nvme_auth_negotiate(ctrl, 0);
1008 	if (ret) {
1009 		dev_warn(ctrl->device,
1010 			 "qid 0: error %d setting up authentication\n", ret);
1011 		return;
1012 	}
1013 	ret = nvme_auth_wait(ctrl, 0);
1014 	if (ret) {
1015 		dev_warn(ctrl->device,
1016 			 "qid 0: authentication failed\n");
1017 		return;
1018 	}
1019 	/*
1020 	 * Only run authentication on the admin queue for secure concatenation.
1021 	 */
1022 	if (ctrl->opts->concat)
1023 		return;
1024 
1025 	for (q = 1; q < ctrl->queue_count; q++) {
1026 		ret = nvme_auth_negotiate(ctrl, q);
1027 		if (ret) {
1028 			dev_warn(ctrl->device,
1029 				 "qid %d: error %d setting up authentication\n",
1030 				 q, ret);
1031 			break;
1032 		}
1033 	}
1034 
1035 	/*
1036 	 * Failure is a soft-state; credentials remain valid until
1037 	 * the controller terminates the connection.
1038 	 */
1039 	for (q = 1; q < ctrl->queue_count; q++) {
1040 		ret = nvme_auth_wait(ctrl, q);
1041 		if (ret)
1042 			dev_warn(ctrl->device,
1043 				 "qid %d: authentication failed\n", q);
1044 	}
1045 }
1046 
nvme_auth_init_ctrl(struct nvme_ctrl * ctrl)1047 int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
1048 {
1049 	struct nvme_dhchap_queue_context *chap;
1050 	int i, ret;
1051 
1052 	mutex_init(&ctrl->dhchap_auth_mutex);
1053 	INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work);
1054 	if (!ctrl->opts)
1055 		return 0;
1056 	ret = nvme_auth_generate_key(ctrl->opts->dhchap_secret,
1057 			&ctrl->host_key);
1058 	if (ret)
1059 		return ret;
1060 	ret = nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret,
1061 			&ctrl->ctrl_key);
1062 	if (ret)
1063 		goto err_free_dhchap_secret;
1064 
1065 	if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret)
1066 		return 0;
1067 
1068 	ctrl->dhchap_ctxs = kvcalloc(ctrl_max_dhchaps(ctrl),
1069 				sizeof(*chap), GFP_KERNEL);
1070 	if (!ctrl->dhchap_ctxs) {
1071 		ret = -ENOMEM;
1072 		goto err_free_dhchap_ctrl_secret;
1073 	}
1074 
1075 	for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) {
1076 		chap = &ctrl->dhchap_ctxs[i];
1077 		chap->qid = i;
1078 		chap->ctrl = ctrl;
1079 		INIT_WORK(&chap->auth_work, nvme_queue_auth_work);
1080 	}
1081 
1082 	return 0;
1083 err_free_dhchap_ctrl_secret:
1084 	nvme_auth_free_key(ctrl->ctrl_key);
1085 	ctrl->ctrl_key = NULL;
1086 err_free_dhchap_secret:
1087 	nvme_auth_free_key(ctrl->host_key);
1088 	ctrl->host_key = NULL;
1089 	return ret;
1090 }
1091 EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
1092 
nvme_auth_stop(struct nvme_ctrl * ctrl)1093 void nvme_auth_stop(struct nvme_ctrl *ctrl)
1094 {
1095 	cancel_work_sync(&ctrl->dhchap_auth_work);
1096 }
1097 EXPORT_SYMBOL_GPL(nvme_auth_stop);
1098 
nvme_auth_free(struct nvme_ctrl * ctrl)1099 void nvme_auth_free(struct nvme_ctrl *ctrl)
1100 {
1101 	int i;
1102 
1103 	if (ctrl->dhchap_ctxs) {
1104 		for (i = 0; i < ctrl_max_dhchaps(ctrl); i++)
1105 			nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]);
1106 		kfree(ctrl->dhchap_ctxs);
1107 	}
1108 	if (ctrl->host_key) {
1109 		nvme_auth_free_key(ctrl->host_key);
1110 		ctrl->host_key = NULL;
1111 	}
1112 	if (ctrl->ctrl_key) {
1113 		nvme_auth_free_key(ctrl->ctrl_key);
1114 		ctrl->ctrl_key = NULL;
1115 	}
1116 }
1117 EXPORT_SYMBOL_GPL(nvme_auth_free);
1118 
nvme_init_auth(void)1119 int __init nvme_init_auth(void)
1120 {
1121 	nvme_auth_wq = alloc_workqueue("nvme-auth-wq",
1122 			       WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1123 	if (!nvme_auth_wq)
1124 		return -ENOMEM;
1125 
1126 	nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache",
1127 				CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
1128 	if (!nvme_chap_buf_cache)
1129 		goto err_destroy_workqueue;
1130 
1131 	nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab,
1132 			mempool_free_slab, nvme_chap_buf_cache);
1133 	if (!nvme_chap_buf_pool)
1134 		goto err_destroy_chap_buf_cache;
1135 
1136 	return 0;
1137 err_destroy_chap_buf_cache:
1138 	kmem_cache_destroy(nvme_chap_buf_cache);
1139 err_destroy_workqueue:
1140 	destroy_workqueue(nvme_auth_wq);
1141 	return -ENOMEM;
1142 }
1143 
nvme_exit_auth(void)1144 void __exit nvme_exit_auth(void)
1145 {
1146 	mempool_destroy(nvme_chap_buf_pool);
1147 	kmem_cache_destroy(nvme_chap_buf_cache);
1148 	destroy_workqueue(nvme_auth_wq);
1149 }
1150