xref: /linux/drivers/nvme/host/auth.c (revision e3966940559d52aa1800a008dcfeec218dd31f88)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
4  */
5 
6 #include <linux/crc32.h>
7 #include <linux/base64.h>
8 #include <linux/prandom.h>
9 #include <linux/unaligned.h>
10 #include <crypto/hash.h>
11 #include <crypto/dh.h>
12 #include "nvme.h"
13 #include "fabrics.h"
14 #include <linux/nvme-auth.h>
15 #include <linux/nvme-keyring.h>
16 
17 #define CHAP_BUF_SIZE 4096
18 static struct kmem_cache *nvme_chap_buf_cache;
19 static mempool_t *nvme_chap_buf_pool;
20 
21 struct nvme_dhchap_queue_context {
22 	struct list_head entry;
23 	struct work_struct auth_work;
24 	struct nvme_ctrl *ctrl;
25 	struct crypto_shash *shash_tfm;
26 	struct crypto_kpp *dh_tfm;
27 	struct nvme_dhchap_key *transformed_key;
28 	void *buf;
29 	int qid;
30 	int error;
31 	u32 s1;
32 	u32 s2;
33 	bool bi_directional;
34 	bool authenticated;
35 	u16 transaction;
36 	u8 status;
37 	u8 dhgroup_id;
38 	u8 hash_id;
39 	u8 sc_c;
40 	size_t hash_len;
41 	u8 c1[64];
42 	u8 c2[64];
43 	u8 response[64];
44 	u8 *ctrl_key;
45 	u8 *host_key;
46 	u8 *sess_key;
47 	int ctrl_key_len;
48 	int host_key_len;
49 	int sess_key_len;
50 };
51 
52 static struct workqueue_struct *nvme_auth_wq;
53 
54 static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl)
55 {
56 	return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues +
57 			ctrl->opts->nr_poll_queues + 1;
58 }
59 
60 static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
61 			    void *data, size_t data_len, bool auth_send)
62 {
63 	struct nvme_command cmd = {};
64 	nvme_submit_flags_t flags = NVME_SUBMIT_RETRY;
65 	struct request_queue *q = ctrl->fabrics_q;
66 	int ret;
67 
68 	if (qid != 0) {
69 		flags |= NVME_SUBMIT_NOWAIT | NVME_SUBMIT_RESERVED;
70 		q = ctrl->connect_q;
71 	}
72 
73 	cmd.auth_common.opcode = nvme_fabrics_command;
74 	cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
75 	cmd.auth_common.spsp0 = 0x01;
76 	cmd.auth_common.spsp1 = 0x01;
77 	if (auth_send) {
78 		cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
79 		cmd.auth_send.tl = cpu_to_le32(data_len);
80 	} else {
81 		cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
82 		cmd.auth_receive.al = cpu_to_le32(data_len);
83 	}
84 
85 	ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
86 				     qid == 0 ? NVME_QID_ANY : qid, flags);
87 	if (ret > 0)
88 		dev_warn(ctrl->device,
89 			"qid %d auth_send failed with status %d\n", qid, ret);
90 	else if (ret < 0)
91 		dev_err(ctrl->device,
92 			"qid %d auth_send failed with error %d\n", qid, ret);
93 	return ret;
94 }
95 
96 static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
97 		struct nvmf_auth_dhchap_failure_data *data,
98 		u16 transaction, u8 expected_msg)
99 {
100 	dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
101 		__func__, qid, data->auth_type, data->auth_id);
102 
103 	if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
104 	    data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
105 		return data->rescode_exp;
106 	}
107 	if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
108 	    data->auth_id != expected_msg) {
109 		dev_warn(ctrl->device,
110 			 "qid %d invalid message %02x/%02x\n",
111 			 qid, data->auth_type, data->auth_id);
112 		return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
113 	}
114 	if (le16_to_cpu(data->t_id) != transaction) {
115 		dev_warn(ctrl->device,
116 			 "qid %d invalid transaction ID %d\n",
117 			 qid, le16_to_cpu(data->t_id));
118 		return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
119 	}
120 	return 0;
121 }
122 
123 static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
124 		struct nvme_dhchap_queue_context *chap)
125 {
126 	struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
127 	size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
128 
129 	if (size > CHAP_BUF_SIZE) {
130 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
131 		return -EINVAL;
132 	}
133 	memset((u8 *)chap->buf, 0, size);
134 	data->auth_type = NVME_AUTH_COMMON_MESSAGES;
135 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
136 	data->t_id = cpu_to_le16(chap->transaction);
137 	if (ctrl->opts->concat && chap->qid == 0) {
138 		if (ctrl->opts->tls_key)
139 			data->sc_c = NVME_AUTH_SECP_REPLACETLSPSK;
140 		else
141 			data->sc_c = NVME_AUTH_SECP_NEWTLSPSK;
142 	} else
143 		data->sc_c = NVME_AUTH_SECP_NOSC;
144 	data->napd = 1;
145 	data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
146 	data->auth_protocol[0].dhchap.halen = 3;
147 	data->auth_protocol[0].dhchap.dhlen = 6;
148 	data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256;
149 	data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384;
150 	data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512;
151 	data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL;
152 	data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048;
153 	data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072;
154 	data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096;
155 	data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144;
156 	data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192;
157 
158 	chap->sc_c = data->sc_c;
159 
160 	return size;
161 }
162 
163 static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
164 		struct nvme_dhchap_queue_context *chap)
165 {
166 	struct nvmf_auth_dhchap_challenge_data *data = chap->buf;
167 	u16 dhvlen = le16_to_cpu(data->dhvlen);
168 	size_t size = sizeof(*data) + data->hl + dhvlen;
169 	const char *gid_name = nvme_auth_dhgroup_name(data->dhgid);
170 	const char *hmac_name, *kpp_name;
171 
172 	if (size > CHAP_BUF_SIZE) {
173 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
174 		return -EINVAL;
175 	}
176 
177 	hmac_name = nvme_auth_hmac_name(data->hashid);
178 	if (!hmac_name) {
179 		dev_warn(ctrl->device,
180 			 "qid %d: invalid HASH ID %d\n",
181 			 chap->qid, data->hashid);
182 		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
183 		return -EPROTO;
184 	}
185 
186 	if (chap->hash_id == data->hashid && chap->shash_tfm &&
187 	    !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) &&
188 	    crypto_shash_digestsize(chap->shash_tfm) == data->hl) {
189 		dev_dbg(ctrl->device,
190 			"qid %d: reuse existing hash %s\n",
191 			chap->qid, hmac_name);
192 		goto select_kpp;
193 	}
194 
195 	/* Reset if hash cannot be reused */
196 	if (chap->shash_tfm) {
197 		crypto_free_shash(chap->shash_tfm);
198 		chap->hash_id = 0;
199 		chap->hash_len = 0;
200 	}
201 	chap->shash_tfm = crypto_alloc_shash(hmac_name, 0,
202 					     CRYPTO_ALG_ALLOCATES_MEMORY);
203 	if (IS_ERR(chap->shash_tfm)) {
204 		dev_warn(ctrl->device,
205 			 "qid %d: failed to allocate hash %s, error %ld\n",
206 			 chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
207 		chap->shash_tfm = NULL;
208 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
209 		return -ENOMEM;
210 	}
211 
212 	if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
213 		dev_warn(ctrl->device,
214 			 "qid %d: invalid hash length %d\n",
215 			 chap->qid, data->hl);
216 		crypto_free_shash(chap->shash_tfm);
217 		chap->shash_tfm = NULL;
218 		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
219 		return -EPROTO;
220 	}
221 
222 	chap->hash_id = data->hashid;
223 	chap->hash_len = data->hl;
224 	dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
225 		chap->qid, hmac_name);
226 
227 select_kpp:
228 	kpp_name = nvme_auth_dhgroup_kpp(data->dhgid);
229 	if (!kpp_name) {
230 		dev_warn(ctrl->device,
231 			 "qid %d: invalid DH group id %d\n",
232 			 chap->qid, data->dhgid);
233 		chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
234 		/* Leave previous dh_tfm intact */
235 		return -EPROTO;
236 	}
237 
238 	if (chap->dhgroup_id == data->dhgid &&
239 	    (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) {
240 		dev_dbg(ctrl->device,
241 			"qid %d: reuse existing DH group %s\n",
242 			chap->qid, gid_name);
243 		goto skip_kpp;
244 	}
245 
246 	/* Reset dh_tfm if it can't be reused */
247 	if (chap->dh_tfm) {
248 		crypto_free_kpp(chap->dh_tfm);
249 		chap->dh_tfm = NULL;
250 	}
251 
252 	if (data->dhgid != NVME_AUTH_DHGROUP_NULL) {
253 		if (dhvlen == 0) {
254 			dev_warn(ctrl->device,
255 				 "qid %d: empty DH value\n",
256 				 chap->qid);
257 			chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
258 			return -EPROTO;
259 		}
260 
261 		chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0);
262 		if (IS_ERR(chap->dh_tfm)) {
263 			int ret = PTR_ERR(chap->dh_tfm);
264 
265 			dev_warn(ctrl->device,
266 				 "qid %d: error %d initializing DH group %s\n",
267 				 chap->qid, ret, gid_name);
268 			chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
269 			chap->dh_tfm = NULL;
270 			return ret;
271 		}
272 		dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
273 			chap->qid, gid_name);
274 	} else if (dhvlen != 0) {
275 		dev_warn(ctrl->device,
276 			 "qid %d: invalid DH value for NULL DH\n",
277 			 chap->qid);
278 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
279 		return -EPROTO;
280 	}
281 	chap->dhgroup_id = data->dhgid;
282 
283 skip_kpp:
284 	chap->s1 = le32_to_cpu(data->seqnum);
285 	memcpy(chap->c1, data->cval, chap->hash_len);
286 	if (dhvlen) {
287 		chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL);
288 		if (!chap->ctrl_key) {
289 			chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
290 			return -ENOMEM;
291 		}
292 		chap->ctrl_key_len = dhvlen;
293 		memcpy(chap->ctrl_key, data->cval + chap->hash_len,
294 		       dhvlen);
295 		dev_dbg(ctrl->device, "ctrl public key %*ph\n",
296 			 (int)chap->ctrl_key_len, chap->ctrl_key);
297 	}
298 
299 	return 0;
300 }
301 
302 static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
303 		struct nvme_dhchap_queue_context *chap)
304 {
305 	struct nvmf_auth_dhchap_reply_data *data = chap->buf;
306 	size_t size = sizeof(*data);
307 
308 	size += 2 * chap->hash_len;
309 
310 	if (chap->host_key_len)
311 		size += chap->host_key_len;
312 
313 	if (size > CHAP_BUF_SIZE) {
314 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
315 		return -EINVAL;
316 	}
317 
318 	memset(chap->buf, 0, size);
319 	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
320 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
321 	data->t_id = cpu_to_le16(chap->transaction);
322 	data->hl = chap->hash_len;
323 	data->dhvlen = cpu_to_le16(chap->host_key_len);
324 	memcpy(data->rval, chap->response, chap->hash_len);
325 	if (ctrl->ctrl_key)
326 		chap->bi_directional = true;
327 	if (ctrl->ctrl_key || ctrl->opts->concat) {
328 		get_random_bytes(chap->c2, chap->hash_len);
329 		data->cvalid = 1;
330 		memcpy(data->rval + chap->hash_len, chap->c2,
331 		       chap->hash_len);
332 		dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
333 			__func__, chap->qid, (int)chap->hash_len, chap->c2);
334 	} else {
335 		memset(chap->c2, 0, chap->hash_len);
336 	}
337 	if (ctrl->opts->concat) {
338 		chap->s2 = 0;
339 		chap->bi_directional = false;
340 	} else
341 		chap->s2 = nvme_auth_get_seqnum();
342 	data->seqnum = cpu_to_le32(chap->s2);
343 	if (chap->host_key_len) {
344 		dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
345 			__func__, chap->qid,
346 			chap->host_key_len, chap->host_key);
347 		memcpy(data->rval + 2 * chap->hash_len, chap->host_key,
348 		       chap->host_key_len);
349 	}
350 
351 	return size;
352 }
353 
354 static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
355 		struct nvme_dhchap_queue_context *chap)
356 {
357 	struct nvmf_auth_dhchap_success1_data *data = chap->buf;
358 	size_t size = sizeof(*data) + chap->hash_len;
359 
360 	if (size > CHAP_BUF_SIZE) {
361 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
362 		return -EINVAL;
363 	}
364 
365 	if (data->hl != chap->hash_len) {
366 		dev_warn(ctrl->device,
367 			 "qid %d: invalid hash length %u\n",
368 			 chap->qid, data->hl);
369 		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
370 		return -EPROTO;
371 	}
372 
373 	/* Just print out information for the admin queue */
374 	if (chap->qid == 0)
375 		dev_info(ctrl->device,
376 			 "qid 0: authenticated with hash %s dhgroup %s\n",
377 			 nvme_auth_hmac_name(chap->hash_id),
378 			 nvme_auth_dhgroup_name(chap->dhgroup_id));
379 
380 	if (!data->rvalid)
381 		return 0;
382 
383 	/* Validate controller response */
384 	if (memcmp(chap->response, data->rval, data->hl)) {
385 		dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n",
386 			__func__, chap->qid, (int)chap->hash_len, data->rval);
387 		dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n",
388 			__func__, chap->qid, (int)chap->hash_len,
389 			chap->response);
390 		dev_warn(ctrl->device,
391 			 "qid %d: controller authentication failed\n",
392 			 chap->qid);
393 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
394 		return -ECONNREFUSED;
395 	}
396 
397 	/* Just print out information for the admin queue */
398 	if (chap->qid == 0)
399 		dev_info(ctrl->device,
400 			 "qid 0: controller authenticated\n");
401 	return 0;
402 }
403 
404 static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl,
405 		struct nvme_dhchap_queue_context *chap)
406 {
407 	struct nvmf_auth_dhchap_success2_data *data = chap->buf;
408 	size_t size = sizeof(*data);
409 
410 	memset(chap->buf, 0, size);
411 	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
412 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
413 	data->t_id = cpu_to_le16(chap->transaction);
414 
415 	return size;
416 }
417 
418 static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
419 		struct nvme_dhchap_queue_context *chap)
420 {
421 	struct nvmf_auth_dhchap_failure_data *data = chap->buf;
422 	size_t size = sizeof(*data);
423 
424 	memset(chap->buf, 0, size);
425 	data->auth_type = NVME_AUTH_COMMON_MESSAGES;
426 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
427 	data->t_id = cpu_to_le16(chap->transaction);
428 	data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
429 	data->rescode_exp = chap->status;
430 
431 	return size;
432 }
433 
434 static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
435 		struct nvme_dhchap_queue_context *chap)
436 {
437 	SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
438 	u8 buf[4], *challenge = chap->c1;
439 	int ret;
440 
441 	dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n",
442 		__func__, chap->qid, chap->s1, chap->transaction);
443 
444 	if (!chap->transformed_key) {
445 		chap->transformed_key = nvme_auth_transform_key(ctrl->host_key,
446 						ctrl->opts->host->nqn);
447 		if (IS_ERR(chap->transformed_key)) {
448 			ret = PTR_ERR(chap->transformed_key);
449 			chap->transformed_key = NULL;
450 			return ret;
451 		}
452 	} else {
453 		dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
454 			__func__, chap->qid);
455 	}
456 
457 	ret = crypto_shash_setkey(chap->shash_tfm,
458 			chap->transformed_key->key, chap->transformed_key->len);
459 	if (ret) {
460 		dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
461 			 chap->qid, ret);
462 		goto out;
463 	}
464 
465 	if (chap->dh_tfm) {
466 		challenge = kmalloc(chap->hash_len, GFP_KERNEL);
467 		if (!challenge) {
468 			ret = -ENOMEM;
469 			goto out;
470 		}
471 		ret = nvme_auth_augmented_challenge(chap->hash_id,
472 						    chap->sess_key,
473 						    chap->sess_key_len,
474 						    chap->c1, challenge,
475 						    chap->hash_len);
476 		if (ret)
477 			goto out;
478 	}
479 
480 	shash->tfm = chap->shash_tfm;
481 	ret = crypto_shash_init(shash);
482 	if (ret)
483 		goto out;
484 	ret = crypto_shash_update(shash, challenge, chap->hash_len);
485 	if (ret)
486 		goto out;
487 	put_unaligned_le32(chap->s1, buf);
488 	ret = crypto_shash_update(shash, buf, 4);
489 	if (ret)
490 		goto out;
491 	put_unaligned_le16(chap->transaction, buf);
492 	ret = crypto_shash_update(shash, buf, 2);
493 	if (ret)
494 		goto out;
495 	*buf = chap->sc_c;
496 	ret = crypto_shash_update(shash, buf, 1);
497 	if (ret)
498 		goto out;
499 	ret = crypto_shash_update(shash, "HostHost", 8);
500 	if (ret)
501 		goto out;
502 	ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
503 				  strlen(ctrl->opts->host->nqn));
504 	if (ret)
505 		goto out;
506 	memset(buf, 0, sizeof(buf));
507 	ret = crypto_shash_update(shash, buf, 1);
508 	if (ret)
509 		goto out;
510 	ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
511 			    strlen(ctrl->opts->subsysnqn));
512 	if (ret)
513 		goto out;
514 	ret = crypto_shash_final(shash, chap->response);
515 out:
516 	if (challenge != chap->c1)
517 		kfree(challenge);
518 	return ret;
519 }
520 
521 static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
522 		struct nvme_dhchap_queue_context *chap)
523 {
524 	SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
525 	struct nvme_dhchap_key *transformed_key;
526 	u8 buf[4], *challenge = chap->c2;
527 	int ret;
528 
529 	transformed_key = nvme_auth_transform_key(ctrl->ctrl_key,
530 				ctrl->opts->subsysnqn);
531 	if (IS_ERR(transformed_key)) {
532 		ret = PTR_ERR(transformed_key);
533 		return ret;
534 	}
535 
536 	ret = crypto_shash_setkey(chap->shash_tfm,
537 			transformed_key->key, transformed_key->len);
538 	if (ret) {
539 		dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
540 			 chap->qid, ret);
541 		goto out;
542 	}
543 
544 	if (chap->dh_tfm) {
545 		challenge = kmalloc(chap->hash_len, GFP_KERNEL);
546 		if (!challenge) {
547 			ret = -ENOMEM;
548 			goto out;
549 		}
550 		ret = nvme_auth_augmented_challenge(chap->hash_id,
551 						    chap->sess_key,
552 						    chap->sess_key_len,
553 						    chap->c2, challenge,
554 						    chap->hash_len);
555 		if (ret)
556 			goto out;
557 	}
558 	dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n",
559 		__func__, chap->qid, chap->s2, chap->transaction);
560 	dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n",
561 		__func__, chap->qid, (int)chap->hash_len, challenge);
562 	dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n",
563 		__func__, chap->qid, ctrl->opts->subsysnqn);
564 	dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
565 		__func__, chap->qid, ctrl->opts->host->nqn);
566 	shash->tfm = chap->shash_tfm;
567 	ret = crypto_shash_init(shash);
568 	if (ret)
569 		goto out;
570 	ret = crypto_shash_update(shash, challenge, chap->hash_len);
571 	if (ret)
572 		goto out;
573 	put_unaligned_le32(chap->s2, buf);
574 	ret = crypto_shash_update(shash, buf, 4);
575 	if (ret)
576 		goto out;
577 	put_unaligned_le16(chap->transaction, buf);
578 	ret = crypto_shash_update(shash, buf, 2);
579 	if (ret)
580 		goto out;
581 	memset(buf, 0, 4);
582 	ret = crypto_shash_update(shash, buf, 1);
583 	if (ret)
584 		goto out;
585 	ret = crypto_shash_update(shash, "Controller", 10);
586 	if (ret)
587 		goto out;
588 	ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
589 				  strlen(ctrl->opts->subsysnqn));
590 	if (ret)
591 		goto out;
592 	ret = crypto_shash_update(shash, buf, 1);
593 	if (ret)
594 		goto out;
595 	ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
596 				  strlen(ctrl->opts->host->nqn));
597 	if (ret)
598 		goto out;
599 	ret = crypto_shash_final(shash, chap->response);
600 out:
601 	if (challenge != chap->c2)
602 		kfree(challenge);
603 	nvme_auth_free_key(transformed_key);
604 	return ret;
605 }
606 
607 static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl,
608 		struct nvme_dhchap_queue_context *chap)
609 {
610 	int ret;
611 
612 	if (chap->host_key && chap->host_key_len) {
613 		dev_dbg(ctrl->device,
614 			"qid %d: reusing host key\n", chap->qid);
615 		goto gen_sesskey;
616 	}
617 	ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id);
618 	if (ret < 0) {
619 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
620 		return ret;
621 	}
622 
623 	chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm);
624 
625 	chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL);
626 	if (!chap->host_key) {
627 		chap->host_key_len = 0;
628 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
629 		return -ENOMEM;
630 	}
631 	ret = nvme_auth_gen_pubkey(chap->dh_tfm,
632 				   chap->host_key, chap->host_key_len);
633 	if (ret) {
634 		dev_dbg(ctrl->device,
635 			"failed to generate public key, error %d\n", ret);
636 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
637 		return ret;
638 	}
639 
640 gen_sesskey:
641 	chap->sess_key_len = chap->host_key_len;
642 	chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL);
643 	if (!chap->sess_key) {
644 		chap->sess_key_len = 0;
645 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
646 		return -ENOMEM;
647 	}
648 
649 	ret = nvme_auth_gen_shared_secret(chap->dh_tfm,
650 					  chap->ctrl_key, chap->ctrl_key_len,
651 					  chap->sess_key, chap->sess_key_len);
652 	if (ret) {
653 		dev_dbg(ctrl->device,
654 			"failed to generate shared secret, error %d\n", ret);
655 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
656 		return ret;
657 	}
658 	dev_dbg(ctrl->device, "shared secret %*ph\n",
659 		(int)chap->sess_key_len, chap->sess_key);
660 	return 0;
661 }
662 
663 static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
664 {
665 	nvme_auth_free_key(chap->transformed_key);
666 	chap->transformed_key = NULL;
667 	kfree_sensitive(chap->host_key);
668 	chap->host_key = NULL;
669 	chap->host_key_len = 0;
670 	kfree_sensitive(chap->ctrl_key);
671 	chap->ctrl_key = NULL;
672 	chap->ctrl_key_len = 0;
673 	kfree_sensitive(chap->sess_key);
674 	chap->sess_key = NULL;
675 	chap->sess_key_len = 0;
676 	chap->status = 0;
677 	chap->error = 0;
678 	chap->s1 = 0;
679 	chap->s2 = 0;
680 	chap->bi_directional = false;
681 	chap->transaction = 0;
682 	memset(chap->c1, 0, sizeof(chap->c1));
683 	memset(chap->c2, 0, sizeof(chap->c2));
684 	mempool_free(chap->buf, nvme_chap_buf_pool);
685 	chap->buf = NULL;
686 }
687 
688 static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap)
689 {
690 	nvme_auth_reset_dhchap(chap);
691 	chap->authenticated = false;
692 	if (chap->shash_tfm)
693 		crypto_free_shash(chap->shash_tfm);
694 	if (chap->dh_tfm)
695 		crypto_free_kpp(chap->dh_tfm);
696 }
697 
698 void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl)
699 {
700 	dev_dbg(ctrl->device, "Wipe generated TLS PSK %08x\n",
701 		key_serial(ctrl->opts->tls_key));
702 	key_revoke(ctrl->opts->tls_key);
703 	key_put(ctrl->opts->tls_key);
704 	ctrl->opts->tls_key = NULL;
705 }
706 EXPORT_SYMBOL_GPL(nvme_auth_revoke_tls_key);
707 
708 static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl,
709 				   struct nvme_dhchap_queue_context *chap)
710 {
711 	u8 *psk, *digest, *tls_psk;
712 	struct key *tls_key;
713 	size_t psk_len;
714 	int ret = 0;
715 
716 	if (!chap->sess_key) {
717 		dev_warn(ctrl->device,
718 			 "%s: qid %d no session key negotiated\n",
719 			 __func__, chap->qid);
720 		return -ENOKEY;
721 	}
722 
723 	if (chap->qid) {
724 		dev_warn(ctrl->device,
725 			 "qid %d: secure concatenation not supported on I/O queues\n",
726 			 chap->qid);
727 		return -EINVAL;
728 	}
729 	ret = nvme_auth_generate_psk(chap->hash_id, chap->sess_key,
730 				     chap->sess_key_len,
731 				     chap->c1, chap->c2,
732 				     chap->hash_len, &psk, &psk_len);
733 	if (ret) {
734 		dev_warn(ctrl->device,
735 			 "%s: qid %d failed to generate PSK, error %d\n",
736 			 __func__, chap->qid, ret);
737 		return ret;
738 	}
739 	dev_dbg(ctrl->device,
740 		  "%s: generated psk %*ph\n", __func__, (int)psk_len, psk);
741 
742 	ret = nvme_auth_generate_digest(chap->hash_id, psk, psk_len,
743 					ctrl->opts->subsysnqn,
744 					ctrl->opts->host->nqn, &digest);
745 	if (ret) {
746 		dev_warn(ctrl->device,
747 			 "%s: qid %d failed to generate digest, error %d\n",
748 			 __func__, chap->qid, ret);
749 		goto out_free_psk;
750 	}
751 	dev_dbg(ctrl->device, "%s: generated digest %s\n",
752 		 __func__, digest);
753 	ret = nvme_auth_derive_tls_psk(chap->hash_id, psk, psk_len,
754 				       digest, &tls_psk);
755 	if (ret) {
756 		dev_warn(ctrl->device,
757 			 "%s: qid %d failed to derive TLS psk, error %d\n",
758 			 __func__, chap->qid, ret);
759 		goto out_free_digest;
760 	}
761 
762 	tls_key = nvme_tls_psk_refresh(ctrl->opts->keyring,
763 				       ctrl->opts->host->nqn,
764 				       ctrl->opts->subsysnqn, chap->hash_id,
765 				       tls_psk, psk_len, digest);
766 	if (IS_ERR(tls_key)) {
767 		ret = PTR_ERR(tls_key);
768 		dev_warn(ctrl->device,
769 			 "%s: qid %d failed to insert generated key, error %d\n",
770 			 __func__, chap->qid, ret);
771 		tls_key = NULL;
772 	}
773 	kfree_sensitive(tls_psk);
774 	if (ctrl->opts->tls_key)
775 		nvme_auth_revoke_tls_key(ctrl);
776 	ctrl->opts->tls_key = tls_key;
777 out_free_digest:
778 	kfree_sensitive(digest);
779 out_free_psk:
780 	kfree_sensitive(psk);
781 	return ret;
782 }
783 
784 static void nvme_queue_auth_work(struct work_struct *work)
785 {
786 	struct nvme_dhchap_queue_context *chap =
787 		container_of(work, struct nvme_dhchap_queue_context, auth_work);
788 	struct nvme_ctrl *ctrl = chap->ctrl;
789 	size_t tl;
790 	int ret = 0;
791 
792 	/*
793 	 * Allocate a large enough buffer for the entire negotiation:
794 	 * 4k is enough to ffdhe8192.
795 	 */
796 	chap->buf = mempool_alloc(nvme_chap_buf_pool, GFP_KERNEL);
797 	if (!chap->buf) {
798 		chap->error = -ENOMEM;
799 		return;
800 	}
801 
802 	chap->transaction = ctrl->transaction++;
803 
804 	/* DH-HMAC-CHAP Step 1: send negotiate */
805 	dev_dbg(ctrl->device, "%s: qid %d send negotiate\n",
806 		__func__, chap->qid);
807 	ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap);
808 	if (ret < 0) {
809 		chap->error = ret;
810 		return;
811 	}
812 	tl = ret;
813 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
814 	if (ret) {
815 		chap->error = ret;
816 		return;
817 	}
818 
819 	/* DH-HMAC-CHAP Step 2: receive challenge */
820 	dev_dbg(ctrl->device, "%s: qid %d receive challenge\n",
821 		__func__, chap->qid);
822 
823 	memset(chap->buf, 0, CHAP_BUF_SIZE);
824 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
825 			       false);
826 	if (ret) {
827 		dev_warn(ctrl->device,
828 			 "qid %d failed to receive challenge, %s %d\n",
829 			 chap->qid, ret < 0 ? "error" : "nvme status", ret);
830 		chap->error = ret;
831 		return;
832 	}
833 	ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction,
834 					 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
835 	if (ret) {
836 		chap->status = ret;
837 		chap->error = -EKEYREJECTED;
838 		return;
839 	}
840 
841 	ret = nvme_auth_process_dhchap_challenge(ctrl, chap);
842 	if (ret) {
843 		/* Invalid challenge parameters */
844 		chap->error = ret;
845 		goto fail2;
846 	}
847 
848 	if (chap->ctrl_key_len) {
849 		dev_dbg(ctrl->device,
850 			"%s: qid %d DH exponential\n",
851 			__func__, chap->qid);
852 		ret = nvme_auth_dhchap_exponential(ctrl, chap);
853 		if (ret) {
854 			chap->error = ret;
855 			goto fail2;
856 		}
857 	}
858 
859 	dev_dbg(ctrl->device, "%s: qid %d host response\n",
860 		__func__, chap->qid);
861 	mutex_lock(&ctrl->dhchap_auth_mutex);
862 	ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
863 	mutex_unlock(&ctrl->dhchap_auth_mutex);
864 	if (ret) {
865 		chap->error = ret;
866 		goto fail2;
867 	}
868 
869 	/* DH-HMAC-CHAP Step 3: send reply */
870 	dev_dbg(ctrl->device, "%s: qid %d send reply\n",
871 		__func__, chap->qid);
872 	ret = nvme_auth_set_dhchap_reply_data(ctrl, chap);
873 	if (ret < 0) {
874 		chap->error = ret;
875 		goto fail2;
876 	}
877 
878 	tl = ret;
879 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
880 	if (ret) {
881 		chap->error = ret;
882 		goto fail2;
883 	}
884 
885 	/* DH-HMAC-CHAP Step 4: receive success1 */
886 	dev_dbg(ctrl->device, "%s: qid %d receive success1\n",
887 		__func__, chap->qid);
888 
889 	memset(chap->buf, 0, CHAP_BUF_SIZE);
890 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
891 			       false);
892 	if (ret) {
893 		dev_warn(ctrl->device,
894 			 "qid %d failed to receive success1, %s %d\n",
895 			 chap->qid, ret < 0 ? "error" : "nvme status", ret);
896 		chap->error = ret;
897 		return;
898 	}
899 	ret = nvme_auth_receive_validate(ctrl, chap->qid,
900 					 chap->buf, chap->transaction,
901 					 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
902 	if (ret) {
903 		chap->status = ret;
904 		chap->error = -EKEYREJECTED;
905 		return;
906 	}
907 
908 	mutex_lock(&ctrl->dhchap_auth_mutex);
909 	if (ctrl->ctrl_key) {
910 		dev_dbg(ctrl->device,
911 			"%s: qid %d controller response\n",
912 			__func__, chap->qid);
913 		ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap);
914 		if (ret) {
915 			mutex_unlock(&ctrl->dhchap_auth_mutex);
916 			chap->error = ret;
917 			goto fail2;
918 		}
919 	}
920 	mutex_unlock(&ctrl->dhchap_auth_mutex);
921 
922 	ret = nvme_auth_process_dhchap_success1(ctrl, chap);
923 	if (ret) {
924 		/* Controller authentication failed */
925 		chap->error = -EKEYREJECTED;
926 		goto fail2;
927 	}
928 
929 	if (chap->bi_directional) {
930 		/* DH-HMAC-CHAP Step 5: send success2 */
931 		dev_dbg(ctrl->device, "%s: qid %d send success2\n",
932 			__func__, chap->qid);
933 		tl = nvme_auth_set_dhchap_success2_data(ctrl, chap);
934 		ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
935 		if (ret)
936 			chap->error = ret;
937 	}
938 	if (!ret) {
939 		chap->error = 0;
940 		chap->authenticated = true;
941 		if (ctrl->opts->concat &&
942 		    (ret = nvme_auth_secure_concat(ctrl, chap))) {
943 			dev_warn(ctrl->device,
944 				 "%s: qid %d failed to enable secure concatenation\n",
945 				 __func__, chap->qid);
946 			chap->error = ret;
947 			chap->authenticated = false;
948 		}
949 		return;
950 	}
951 
952 fail2:
953 	if (chap->status == 0)
954 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
955 	dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
956 		__func__, chap->qid, chap->status);
957 	tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
958 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
959 	/*
960 	 * only update error if send failure2 failed and no other
961 	 * error had been set during authentication.
962 	 */
963 	if (ret && !chap->error)
964 		chap->error = ret;
965 }
966 
967 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
968 {
969 	struct nvme_dhchap_queue_context *chap;
970 
971 	if (!ctrl->host_key) {
972 		dev_warn(ctrl->device, "qid %d: no key\n", qid);
973 		return -ENOKEY;
974 	}
975 
976 	if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) {
977 		dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid);
978 		return -ENOKEY;
979 	}
980 
981 	chap = &ctrl->dhchap_ctxs[qid];
982 	cancel_work_sync(&chap->auth_work);
983 	queue_work(nvme_auth_wq, &chap->auth_work);
984 	return 0;
985 }
986 EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
987 
988 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
989 {
990 	struct nvme_dhchap_queue_context *chap;
991 	int ret;
992 
993 	chap = &ctrl->dhchap_ctxs[qid];
994 	flush_work(&chap->auth_work);
995 	ret = chap->error;
996 	/* clear sensitive info */
997 	nvme_auth_reset_dhchap(chap);
998 	return ret;
999 }
1000 EXPORT_SYMBOL_GPL(nvme_auth_wait);
1001 
1002 static void nvme_ctrl_auth_work(struct work_struct *work)
1003 {
1004 	struct nvme_ctrl *ctrl =
1005 		container_of(work, struct nvme_ctrl, dhchap_auth_work);
1006 	int ret, q;
1007 
1008 	/*
1009 	 * If the ctrl is no connected, bail as reconnect will handle
1010 	 * authentication.
1011 	 */
1012 	if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
1013 		return;
1014 
1015 	/* Authenticate admin queue first */
1016 	ret = nvme_auth_negotiate(ctrl, 0);
1017 	if (ret) {
1018 		dev_warn(ctrl->device,
1019 			 "qid 0: error %d setting up authentication\n", ret);
1020 		return;
1021 	}
1022 	ret = nvme_auth_wait(ctrl, 0);
1023 	if (ret) {
1024 		dev_warn(ctrl->device,
1025 			 "qid 0: authentication failed\n");
1026 		return;
1027 	}
1028 	/*
1029 	 * Only run authentication on the admin queue for secure concatenation.
1030 	 */
1031 	if (ctrl->opts->concat)
1032 		return;
1033 
1034 	for (q = 1; q < ctrl->queue_count; q++) {
1035 		struct nvme_dhchap_queue_context *chap =
1036 			&ctrl->dhchap_ctxs[q];
1037 		/*
1038 		 * Skip re-authentication if the queue had
1039 		 * not been authenticated initially.
1040 		 */
1041 		if (!chap->authenticated)
1042 			continue;
1043 		cancel_work_sync(&chap->auth_work);
1044 		queue_work(nvme_auth_wq, &chap->auth_work);
1045 	}
1046 
1047 	/*
1048 	 * Failure is a soft-state; credentials remain valid until
1049 	 * the controller terminates the connection.
1050 	 */
1051 	for (q = 1; q < ctrl->queue_count; q++) {
1052 		struct nvme_dhchap_queue_context *chap =
1053 			&ctrl->dhchap_ctxs[q];
1054 		if (!chap->authenticated)
1055 			continue;
1056 		flush_work(&chap->auth_work);
1057 		ret = chap->error;
1058 		nvme_auth_reset_dhchap(chap);
1059 		if (ret)
1060 			dev_warn(ctrl->device,
1061 				 "qid %d: authentication failed\n", q);
1062 	}
1063 }
1064 
1065 int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
1066 {
1067 	struct nvme_dhchap_queue_context *chap;
1068 	int i, ret;
1069 
1070 	mutex_init(&ctrl->dhchap_auth_mutex);
1071 	INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work);
1072 	if (!ctrl->opts)
1073 		return 0;
1074 	ret = nvme_auth_generate_key(ctrl->opts->dhchap_secret,
1075 			&ctrl->host_key);
1076 	if (ret)
1077 		return ret;
1078 	ret = nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret,
1079 			&ctrl->ctrl_key);
1080 	if (ret)
1081 		goto err_free_dhchap_secret;
1082 
1083 	if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret)
1084 		return 0;
1085 
1086 	ctrl->dhchap_ctxs = kvcalloc(ctrl_max_dhchaps(ctrl),
1087 				sizeof(*chap), GFP_KERNEL);
1088 	if (!ctrl->dhchap_ctxs) {
1089 		ret = -ENOMEM;
1090 		goto err_free_dhchap_ctrl_secret;
1091 	}
1092 
1093 	for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) {
1094 		chap = &ctrl->dhchap_ctxs[i];
1095 		chap->qid = i;
1096 		chap->ctrl = ctrl;
1097 		chap->authenticated = false;
1098 		INIT_WORK(&chap->auth_work, nvme_queue_auth_work);
1099 	}
1100 
1101 	return 0;
1102 err_free_dhchap_ctrl_secret:
1103 	nvme_auth_free_key(ctrl->ctrl_key);
1104 	ctrl->ctrl_key = NULL;
1105 err_free_dhchap_secret:
1106 	nvme_auth_free_key(ctrl->host_key);
1107 	ctrl->host_key = NULL;
1108 	return ret;
1109 }
1110 EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
1111 
1112 void nvme_auth_stop(struct nvme_ctrl *ctrl)
1113 {
1114 	cancel_work_sync(&ctrl->dhchap_auth_work);
1115 }
1116 EXPORT_SYMBOL_GPL(nvme_auth_stop);
1117 
1118 void nvme_auth_free(struct nvme_ctrl *ctrl)
1119 {
1120 	int i;
1121 
1122 	if (ctrl->dhchap_ctxs) {
1123 		for (i = 0; i < ctrl_max_dhchaps(ctrl); i++)
1124 			nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]);
1125 		kfree(ctrl->dhchap_ctxs);
1126 	}
1127 	if (ctrl->host_key) {
1128 		nvme_auth_free_key(ctrl->host_key);
1129 		ctrl->host_key = NULL;
1130 	}
1131 	if (ctrl->ctrl_key) {
1132 		nvme_auth_free_key(ctrl->ctrl_key);
1133 		ctrl->ctrl_key = NULL;
1134 	}
1135 }
1136 EXPORT_SYMBOL_GPL(nvme_auth_free);
1137 
1138 int __init nvme_init_auth(void)
1139 {
1140 	nvme_auth_wq = alloc_workqueue("nvme-auth-wq",
1141 			       WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1142 	if (!nvme_auth_wq)
1143 		return -ENOMEM;
1144 
1145 	nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache",
1146 				CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
1147 	if (!nvme_chap_buf_cache)
1148 		goto err_destroy_workqueue;
1149 
1150 	nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab,
1151 			mempool_free_slab, nvme_chap_buf_cache);
1152 	if (!nvme_chap_buf_pool)
1153 		goto err_destroy_chap_buf_cache;
1154 
1155 	return 0;
1156 err_destroy_chap_buf_cache:
1157 	kmem_cache_destroy(nvme_chap_buf_cache);
1158 err_destroy_workqueue:
1159 	destroy_workqueue(nvme_auth_wq);
1160 	return -ENOMEM;
1161 }
1162 
1163 void __exit nvme_exit_auth(void)
1164 {
1165 	mempool_destroy(nvme_chap_buf_pool);
1166 	kmem_cache_destroy(nvme_chap_buf_cache);
1167 	destroy_workqueue(nvme_auth_wq);
1168 }
1169