xref: /linux/drivers/nvme/host/auth.c (revision 07fdad3a93756b872da7b53647715c48d0f4a2d0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
4  */
5 
6 #include <linux/crc32.h>
7 #include <linux/base64.h>
8 #include <linux/prandom.h>
9 #include <linux/unaligned.h>
10 #include <crypto/hash.h>
11 #include <crypto/dh.h>
12 #include "nvme.h"
13 #include "fabrics.h"
14 #include <linux/nvme-auth.h>
15 #include <linux/nvme-keyring.h>
16 
17 #define CHAP_BUF_SIZE 4096
18 static struct kmem_cache *nvme_chap_buf_cache;
19 static mempool_t *nvme_chap_buf_pool;
20 
21 struct nvme_dhchap_queue_context {
22 	struct list_head entry;
23 	struct work_struct auth_work;
24 	struct nvme_ctrl *ctrl;
25 	struct crypto_shash *shash_tfm;
26 	struct crypto_kpp *dh_tfm;
27 	struct nvme_dhchap_key *transformed_key;
28 	void *buf;
29 	int qid;
30 	int error;
31 	u32 s1;
32 	u32 s2;
33 	bool bi_directional;
34 	bool authenticated;
35 	u16 transaction;
36 	u8 status;
37 	u8 dhgroup_id;
38 	u8 hash_id;
39 	size_t hash_len;
40 	u8 c1[64];
41 	u8 c2[64];
42 	u8 response[64];
43 	u8 *ctrl_key;
44 	u8 *host_key;
45 	u8 *sess_key;
46 	int ctrl_key_len;
47 	int host_key_len;
48 	int sess_key_len;
49 };
50 
51 static struct workqueue_struct *nvme_auth_wq;
52 
53 static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl)
54 {
55 	return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues +
56 			ctrl->opts->nr_poll_queues + 1;
57 }
58 
59 static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
60 			    void *data, size_t data_len, bool auth_send)
61 {
62 	struct nvme_command cmd = {};
63 	nvme_submit_flags_t flags = NVME_SUBMIT_RETRY;
64 	struct request_queue *q = ctrl->fabrics_q;
65 	int ret;
66 
67 	if (qid != 0) {
68 		flags |= NVME_SUBMIT_NOWAIT | NVME_SUBMIT_RESERVED;
69 		q = ctrl->connect_q;
70 	}
71 
72 	cmd.auth_common.opcode = nvme_fabrics_command;
73 	cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
74 	cmd.auth_common.spsp0 = 0x01;
75 	cmd.auth_common.spsp1 = 0x01;
76 	if (auth_send) {
77 		cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
78 		cmd.auth_send.tl = cpu_to_le32(data_len);
79 	} else {
80 		cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
81 		cmd.auth_receive.al = cpu_to_le32(data_len);
82 	}
83 
84 	ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
85 				     qid == 0 ? NVME_QID_ANY : qid, flags);
86 	if (ret > 0)
87 		dev_warn(ctrl->device,
88 			"qid %d auth_send failed with status %d\n", qid, ret);
89 	else if (ret < 0)
90 		dev_err(ctrl->device,
91 			"qid %d auth_send failed with error %d\n", qid, ret);
92 	return ret;
93 }
94 
95 static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
96 		struct nvmf_auth_dhchap_failure_data *data,
97 		u16 transaction, u8 expected_msg)
98 {
99 	dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
100 		__func__, qid, data->auth_type, data->auth_id);
101 
102 	if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
103 	    data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
104 		return data->rescode_exp;
105 	}
106 	if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
107 	    data->auth_id != expected_msg) {
108 		dev_warn(ctrl->device,
109 			 "qid %d invalid message %02x/%02x\n",
110 			 qid, data->auth_type, data->auth_id);
111 		return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
112 	}
113 	if (le16_to_cpu(data->t_id) != transaction) {
114 		dev_warn(ctrl->device,
115 			 "qid %d invalid transaction ID %d\n",
116 			 qid, le16_to_cpu(data->t_id));
117 		return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
118 	}
119 	return 0;
120 }
121 
122 static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
123 		struct nvme_dhchap_queue_context *chap)
124 {
125 	struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
126 	size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
127 
128 	if (size > CHAP_BUF_SIZE) {
129 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
130 		return -EINVAL;
131 	}
132 	memset((u8 *)chap->buf, 0, size);
133 	data->auth_type = NVME_AUTH_COMMON_MESSAGES;
134 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
135 	data->t_id = cpu_to_le16(chap->transaction);
136 	if (ctrl->opts->concat && chap->qid == 0) {
137 		if (ctrl->opts->tls_key)
138 			data->sc_c = NVME_AUTH_SECP_REPLACETLSPSK;
139 		else
140 			data->sc_c = NVME_AUTH_SECP_NEWTLSPSK;
141 	} else
142 		data->sc_c = NVME_AUTH_SECP_NOSC;
143 	data->napd = 1;
144 	data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
145 	data->auth_protocol[0].dhchap.halen = 3;
146 	data->auth_protocol[0].dhchap.dhlen = 6;
147 	data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256;
148 	data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384;
149 	data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512;
150 	data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL;
151 	data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048;
152 	data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072;
153 	data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096;
154 	data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144;
155 	data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192;
156 
157 	return size;
158 }
159 
160 static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
161 		struct nvme_dhchap_queue_context *chap)
162 {
163 	struct nvmf_auth_dhchap_challenge_data *data = chap->buf;
164 	u16 dhvlen = le16_to_cpu(data->dhvlen);
165 	size_t size = sizeof(*data) + data->hl + dhvlen;
166 	const char *gid_name = nvme_auth_dhgroup_name(data->dhgid);
167 	const char *hmac_name, *kpp_name;
168 
169 	if (size > CHAP_BUF_SIZE) {
170 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
171 		return -EINVAL;
172 	}
173 
174 	hmac_name = nvme_auth_hmac_name(data->hashid);
175 	if (!hmac_name) {
176 		dev_warn(ctrl->device,
177 			 "qid %d: invalid HASH ID %d\n",
178 			 chap->qid, data->hashid);
179 		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
180 		return -EPROTO;
181 	}
182 
183 	if (chap->hash_id == data->hashid && chap->shash_tfm &&
184 	    !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) &&
185 	    crypto_shash_digestsize(chap->shash_tfm) == data->hl) {
186 		dev_dbg(ctrl->device,
187 			"qid %d: reuse existing hash %s\n",
188 			chap->qid, hmac_name);
189 		goto select_kpp;
190 	}
191 
192 	/* Reset if hash cannot be reused */
193 	if (chap->shash_tfm) {
194 		crypto_free_shash(chap->shash_tfm);
195 		chap->hash_id = 0;
196 		chap->hash_len = 0;
197 	}
198 	chap->shash_tfm = crypto_alloc_shash(hmac_name, 0,
199 					     CRYPTO_ALG_ALLOCATES_MEMORY);
200 	if (IS_ERR(chap->shash_tfm)) {
201 		dev_warn(ctrl->device,
202 			 "qid %d: failed to allocate hash %s, error %ld\n",
203 			 chap->qid, hmac_name, PTR_ERR(chap->shash_tfm));
204 		chap->shash_tfm = NULL;
205 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
206 		return -ENOMEM;
207 	}
208 
209 	if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) {
210 		dev_warn(ctrl->device,
211 			 "qid %d: invalid hash length %d\n",
212 			 chap->qid, data->hl);
213 		crypto_free_shash(chap->shash_tfm);
214 		chap->shash_tfm = NULL;
215 		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
216 		return -EPROTO;
217 	}
218 
219 	chap->hash_id = data->hashid;
220 	chap->hash_len = data->hl;
221 	dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
222 		chap->qid, hmac_name);
223 
224 select_kpp:
225 	kpp_name = nvme_auth_dhgroup_kpp(data->dhgid);
226 	if (!kpp_name) {
227 		dev_warn(ctrl->device,
228 			 "qid %d: invalid DH group id %d\n",
229 			 chap->qid, data->dhgid);
230 		chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
231 		/* Leave previous dh_tfm intact */
232 		return -EPROTO;
233 	}
234 
235 	if (chap->dhgroup_id == data->dhgid &&
236 	    (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) {
237 		dev_dbg(ctrl->device,
238 			"qid %d: reuse existing DH group %s\n",
239 			chap->qid, gid_name);
240 		goto skip_kpp;
241 	}
242 
243 	/* Reset dh_tfm if it can't be reused */
244 	if (chap->dh_tfm) {
245 		crypto_free_kpp(chap->dh_tfm);
246 		chap->dh_tfm = NULL;
247 	}
248 
249 	if (data->dhgid != NVME_AUTH_DHGROUP_NULL) {
250 		if (dhvlen == 0) {
251 			dev_warn(ctrl->device,
252 				 "qid %d: empty DH value\n",
253 				 chap->qid);
254 			chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
255 			return -EPROTO;
256 		}
257 
258 		chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0);
259 		if (IS_ERR(chap->dh_tfm)) {
260 			int ret = PTR_ERR(chap->dh_tfm);
261 
262 			dev_warn(ctrl->device,
263 				 "qid %d: error %d initializing DH group %s\n",
264 				 chap->qid, ret, gid_name);
265 			chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
266 			chap->dh_tfm = NULL;
267 			return ret;
268 		}
269 		dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
270 			chap->qid, gid_name);
271 	} else if (dhvlen != 0) {
272 		dev_warn(ctrl->device,
273 			 "qid %d: invalid DH value for NULL DH\n",
274 			 chap->qid);
275 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
276 		return -EPROTO;
277 	}
278 	chap->dhgroup_id = data->dhgid;
279 
280 skip_kpp:
281 	chap->s1 = le32_to_cpu(data->seqnum);
282 	memcpy(chap->c1, data->cval, chap->hash_len);
283 	if (dhvlen) {
284 		chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL);
285 		if (!chap->ctrl_key) {
286 			chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
287 			return -ENOMEM;
288 		}
289 		chap->ctrl_key_len = dhvlen;
290 		memcpy(chap->ctrl_key, data->cval + chap->hash_len,
291 		       dhvlen);
292 		dev_dbg(ctrl->device, "ctrl public key %*ph\n",
293 			 (int)chap->ctrl_key_len, chap->ctrl_key);
294 	}
295 
296 	return 0;
297 }
298 
299 static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
300 		struct nvme_dhchap_queue_context *chap)
301 {
302 	struct nvmf_auth_dhchap_reply_data *data = chap->buf;
303 	size_t size = sizeof(*data);
304 
305 	size += 2 * chap->hash_len;
306 
307 	if (chap->host_key_len)
308 		size += chap->host_key_len;
309 
310 	if (size > CHAP_BUF_SIZE) {
311 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
312 		return -EINVAL;
313 	}
314 
315 	memset(chap->buf, 0, size);
316 	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
317 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
318 	data->t_id = cpu_to_le16(chap->transaction);
319 	data->hl = chap->hash_len;
320 	data->dhvlen = cpu_to_le16(chap->host_key_len);
321 	memcpy(data->rval, chap->response, chap->hash_len);
322 	if (ctrl->ctrl_key)
323 		chap->bi_directional = true;
324 	if (ctrl->ctrl_key || ctrl->opts->concat) {
325 		get_random_bytes(chap->c2, chap->hash_len);
326 		data->cvalid = 1;
327 		memcpy(data->rval + chap->hash_len, chap->c2,
328 		       chap->hash_len);
329 		dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
330 			__func__, chap->qid, (int)chap->hash_len, chap->c2);
331 	} else {
332 		memset(chap->c2, 0, chap->hash_len);
333 	}
334 	if (ctrl->opts->concat) {
335 		chap->s2 = 0;
336 		chap->bi_directional = false;
337 	} else
338 		chap->s2 = nvme_auth_get_seqnum();
339 	data->seqnum = cpu_to_le32(chap->s2);
340 	if (chap->host_key_len) {
341 		dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
342 			__func__, chap->qid,
343 			chap->host_key_len, chap->host_key);
344 		memcpy(data->rval + 2 * chap->hash_len, chap->host_key,
345 		       chap->host_key_len);
346 	}
347 
348 	return size;
349 }
350 
351 static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
352 		struct nvme_dhchap_queue_context *chap)
353 {
354 	struct nvmf_auth_dhchap_success1_data *data = chap->buf;
355 	size_t size = sizeof(*data) + chap->hash_len;
356 
357 	if (size > CHAP_BUF_SIZE) {
358 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
359 		return -EINVAL;
360 	}
361 
362 	if (data->hl != chap->hash_len) {
363 		dev_warn(ctrl->device,
364 			 "qid %d: invalid hash length %u\n",
365 			 chap->qid, data->hl);
366 		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
367 		return -EPROTO;
368 	}
369 
370 	/* Just print out information for the admin queue */
371 	if (chap->qid == 0)
372 		dev_info(ctrl->device,
373 			 "qid 0: authenticated with hash %s dhgroup %s\n",
374 			 nvme_auth_hmac_name(chap->hash_id),
375 			 nvme_auth_dhgroup_name(chap->dhgroup_id));
376 
377 	if (!data->rvalid)
378 		return 0;
379 
380 	/* Validate controller response */
381 	if (memcmp(chap->response, data->rval, data->hl)) {
382 		dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n",
383 			__func__, chap->qid, (int)chap->hash_len, data->rval);
384 		dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n",
385 			__func__, chap->qid, (int)chap->hash_len,
386 			chap->response);
387 		dev_warn(ctrl->device,
388 			 "qid %d: controller authentication failed\n",
389 			 chap->qid);
390 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
391 		return -ECONNREFUSED;
392 	}
393 
394 	/* Just print out information for the admin queue */
395 	if (chap->qid == 0)
396 		dev_info(ctrl->device,
397 			 "qid 0: controller authenticated\n");
398 	return 0;
399 }
400 
401 static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl,
402 		struct nvme_dhchap_queue_context *chap)
403 {
404 	struct nvmf_auth_dhchap_success2_data *data = chap->buf;
405 	size_t size = sizeof(*data);
406 
407 	memset(chap->buf, 0, size);
408 	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
409 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
410 	data->t_id = cpu_to_le16(chap->transaction);
411 
412 	return size;
413 }
414 
415 static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
416 		struct nvme_dhchap_queue_context *chap)
417 {
418 	struct nvmf_auth_dhchap_failure_data *data = chap->buf;
419 	size_t size = sizeof(*data);
420 
421 	memset(chap->buf, 0, size);
422 	data->auth_type = NVME_AUTH_COMMON_MESSAGES;
423 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
424 	data->t_id = cpu_to_le16(chap->transaction);
425 	data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
426 	data->rescode_exp = chap->status;
427 
428 	return size;
429 }
430 
431 static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
432 		struct nvme_dhchap_queue_context *chap)
433 {
434 	SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
435 	u8 buf[4], *challenge = chap->c1;
436 	int ret;
437 
438 	dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n",
439 		__func__, chap->qid, chap->s1, chap->transaction);
440 
441 	if (!chap->transformed_key) {
442 		chap->transformed_key = nvme_auth_transform_key(ctrl->host_key,
443 						ctrl->opts->host->nqn);
444 		if (IS_ERR(chap->transformed_key)) {
445 			ret = PTR_ERR(chap->transformed_key);
446 			chap->transformed_key = NULL;
447 			return ret;
448 		}
449 	} else {
450 		dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
451 			__func__, chap->qid);
452 	}
453 
454 	ret = crypto_shash_setkey(chap->shash_tfm,
455 			chap->transformed_key->key, chap->transformed_key->len);
456 	if (ret) {
457 		dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
458 			 chap->qid, ret);
459 		goto out;
460 	}
461 
462 	if (chap->dh_tfm) {
463 		challenge = kmalloc(chap->hash_len, GFP_KERNEL);
464 		if (!challenge) {
465 			ret = -ENOMEM;
466 			goto out;
467 		}
468 		ret = nvme_auth_augmented_challenge(chap->hash_id,
469 						    chap->sess_key,
470 						    chap->sess_key_len,
471 						    chap->c1, challenge,
472 						    chap->hash_len);
473 		if (ret)
474 			goto out;
475 	}
476 
477 	shash->tfm = chap->shash_tfm;
478 	ret = crypto_shash_init(shash);
479 	if (ret)
480 		goto out;
481 	ret = crypto_shash_update(shash, challenge, chap->hash_len);
482 	if (ret)
483 		goto out;
484 	put_unaligned_le32(chap->s1, buf);
485 	ret = crypto_shash_update(shash, buf, 4);
486 	if (ret)
487 		goto out;
488 	put_unaligned_le16(chap->transaction, buf);
489 	ret = crypto_shash_update(shash, buf, 2);
490 	if (ret)
491 		goto out;
492 	memset(buf, 0, sizeof(buf));
493 	ret = crypto_shash_update(shash, buf, 1);
494 	if (ret)
495 		goto out;
496 	ret = crypto_shash_update(shash, "HostHost", 8);
497 	if (ret)
498 		goto out;
499 	ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
500 				  strlen(ctrl->opts->host->nqn));
501 	if (ret)
502 		goto out;
503 	ret = crypto_shash_update(shash, buf, 1);
504 	if (ret)
505 		goto out;
506 	ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
507 			    strlen(ctrl->opts->subsysnqn));
508 	if (ret)
509 		goto out;
510 	ret = crypto_shash_final(shash, chap->response);
511 out:
512 	if (challenge != chap->c1)
513 		kfree(challenge);
514 	return ret;
515 }
516 
517 static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
518 		struct nvme_dhchap_queue_context *chap)
519 {
520 	SHASH_DESC_ON_STACK(shash, chap->shash_tfm);
521 	struct nvme_dhchap_key *transformed_key;
522 	u8 buf[4], *challenge = chap->c2;
523 	int ret;
524 
525 	transformed_key = nvme_auth_transform_key(ctrl->ctrl_key,
526 				ctrl->opts->subsysnqn);
527 	if (IS_ERR(transformed_key)) {
528 		ret = PTR_ERR(transformed_key);
529 		return ret;
530 	}
531 
532 	ret = crypto_shash_setkey(chap->shash_tfm,
533 			transformed_key->key, transformed_key->len);
534 	if (ret) {
535 		dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n",
536 			 chap->qid, ret);
537 		goto out;
538 	}
539 
540 	if (chap->dh_tfm) {
541 		challenge = kmalloc(chap->hash_len, GFP_KERNEL);
542 		if (!challenge) {
543 			ret = -ENOMEM;
544 			goto out;
545 		}
546 		ret = nvme_auth_augmented_challenge(chap->hash_id,
547 						    chap->sess_key,
548 						    chap->sess_key_len,
549 						    chap->c2, challenge,
550 						    chap->hash_len);
551 		if (ret)
552 			goto out;
553 	}
554 	dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n",
555 		__func__, chap->qid, chap->s2, chap->transaction);
556 	dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n",
557 		__func__, chap->qid, (int)chap->hash_len, challenge);
558 	dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n",
559 		__func__, chap->qid, ctrl->opts->subsysnqn);
560 	dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
561 		__func__, chap->qid, ctrl->opts->host->nqn);
562 	shash->tfm = chap->shash_tfm;
563 	ret = crypto_shash_init(shash);
564 	if (ret)
565 		goto out;
566 	ret = crypto_shash_update(shash, challenge, chap->hash_len);
567 	if (ret)
568 		goto out;
569 	put_unaligned_le32(chap->s2, buf);
570 	ret = crypto_shash_update(shash, buf, 4);
571 	if (ret)
572 		goto out;
573 	put_unaligned_le16(chap->transaction, buf);
574 	ret = crypto_shash_update(shash, buf, 2);
575 	if (ret)
576 		goto out;
577 	memset(buf, 0, 4);
578 	ret = crypto_shash_update(shash, buf, 1);
579 	if (ret)
580 		goto out;
581 	ret = crypto_shash_update(shash, "Controller", 10);
582 	if (ret)
583 		goto out;
584 	ret = crypto_shash_update(shash, ctrl->opts->subsysnqn,
585 				  strlen(ctrl->opts->subsysnqn));
586 	if (ret)
587 		goto out;
588 	ret = crypto_shash_update(shash, buf, 1);
589 	if (ret)
590 		goto out;
591 	ret = crypto_shash_update(shash, ctrl->opts->host->nqn,
592 				  strlen(ctrl->opts->host->nqn));
593 	if (ret)
594 		goto out;
595 	ret = crypto_shash_final(shash, chap->response);
596 out:
597 	if (challenge != chap->c2)
598 		kfree(challenge);
599 	nvme_auth_free_key(transformed_key);
600 	return ret;
601 }
602 
603 static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl,
604 		struct nvme_dhchap_queue_context *chap)
605 {
606 	int ret;
607 
608 	if (chap->host_key && chap->host_key_len) {
609 		dev_dbg(ctrl->device,
610 			"qid %d: reusing host key\n", chap->qid);
611 		goto gen_sesskey;
612 	}
613 	ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id);
614 	if (ret < 0) {
615 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
616 		return ret;
617 	}
618 
619 	chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm);
620 
621 	chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL);
622 	if (!chap->host_key) {
623 		chap->host_key_len = 0;
624 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
625 		return -ENOMEM;
626 	}
627 	ret = nvme_auth_gen_pubkey(chap->dh_tfm,
628 				   chap->host_key, chap->host_key_len);
629 	if (ret) {
630 		dev_dbg(ctrl->device,
631 			"failed to generate public key, error %d\n", ret);
632 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
633 		return ret;
634 	}
635 
636 gen_sesskey:
637 	chap->sess_key_len = chap->host_key_len;
638 	chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL);
639 	if (!chap->sess_key) {
640 		chap->sess_key_len = 0;
641 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
642 		return -ENOMEM;
643 	}
644 
645 	ret = nvme_auth_gen_shared_secret(chap->dh_tfm,
646 					  chap->ctrl_key, chap->ctrl_key_len,
647 					  chap->sess_key, chap->sess_key_len);
648 	if (ret) {
649 		dev_dbg(ctrl->device,
650 			"failed to generate shared secret, error %d\n", ret);
651 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
652 		return ret;
653 	}
654 	dev_dbg(ctrl->device, "shared secret %*ph\n",
655 		(int)chap->sess_key_len, chap->sess_key);
656 	return 0;
657 }
658 
659 static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
660 {
661 	nvme_auth_free_key(chap->transformed_key);
662 	chap->transformed_key = NULL;
663 	kfree_sensitive(chap->host_key);
664 	chap->host_key = NULL;
665 	chap->host_key_len = 0;
666 	kfree_sensitive(chap->ctrl_key);
667 	chap->ctrl_key = NULL;
668 	chap->ctrl_key_len = 0;
669 	kfree_sensitive(chap->sess_key);
670 	chap->sess_key = NULL;
671 	chap->sess_key_len = 0;
672 	chap->status = 0;
673 	chap->error = 0;
674 	chap->s1 = 0;
675 	chap->s2 = 0;
676 	chap->bi_directional = false;
677 	chap->transaction = 0;
678 	memset(chap->c1, 0, sizeof(chap->c1));
679 	memset(chap->c2, 0, sizeof(chap->c2));
680 	mempool_free(chap->buf, nvme_chap_buf_pool);
681 	chap->buf = NULL;
682 }
683 
684 static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap)
685 {
686 	nvme_auth_reset_dhchap(chap);
687 	chap->authenticated = false;
688 	if (chap->shash_tfm)
689 		crypto_free_shash(chap->shash_tfm);
690 	if (chap->dh_tfm)
691 		crypto_free_kpp(chap->dh_tfm);
692 }
693 
694 void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl)
695 {
696 	dev_dbg(ctrl->device, "Wipe generated TLS PSK %08x\n",
697 		key_serial(ctrl->opts->tls_key));
698 	key_revoke(ctrl->opts->tls_key);
699 	key_put(ctrl->opts->tls_key);
700 	ctrl->opts->tls_key = NULL;
701 }
702 EXPORT_SYMBOL_GPL(nvme_auth_revoke_tls_key);
703 
704 static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl,
705 				   struct nvme_dhchap_queue_context *chap)
706 {
707 	u8 *psk, *digest, *tls_psk;
708 	struct key *tls_key;
709 	size_t psk_len;
710 	int ret = 0;
711 
712 	if (!chap->sess_key) {
713 		dev_warn(ctrl->device,
714 			 "%s: qid %d no session key negotiated\n",
715 			 __func__, chap->qid);
716 		return -ENOKEY;
717 	}
718 
719 	if (chap->qid) {
720 		dev_warn(ctrl->device,
721 			 "qid %d: secure concatenation not supported on I/O queues\n",
722 			 chap->qid);
723 		return -EINVAL;
724 	}
725 	ret = nvme_auth_generate_psk(chap->hash_id, chap->sess_key,
726 				     chap->sess_key_len,
727 				     chap->c1, chap->c2,
728 				     chap->hash_len, &psk, &psk_len);
729 	if (ret) {
730 		dev_warn(ctrl->device,
731 			 "%s: qid %d failed to generate PSK, error %d\n",
732 			 __func__, chap->qid, ret);
733 		return ret;
734 	}
735 	dev_dbg(ctrl->device,
736 		  "%s: generated psk %*ph\n", __func__, (int)psk_len, psk);
737 
738 	ret = nvme_auth_generate_digest(chap->hash_id, psk, psk_len,
739 					ctrl->opts->subsysnqn,
740 					ctrl->opts->host->nqn, &digest);
741 	if (ret) {
742 		dev_warn(ctrl->device,
743 			 "%s: qid %d failed to generate digest, error %d\n",
744 			 __func__, chap->qid, ret);
745 		goto out_free_psk;
746 	}
747 	dev_dbg(ctrl->device, "%s: generated digest %s\n",
748 		 __func__, digest);
749 	ret = nvme_auth_derive_tls_psk(chap->hash_id, psk, psk_len,
750 				       digest, &tls_psk);
751 	if (ret) {
752 		dev_warn(ctrl->device,
753 			 "%s: qid %d failed to derive TLS psk, error %d\n",
754 			 __func__, chap->qid, ret);
755 		goto out_free_digest;
756 	}
757 
758 	tls_key = nvme_tls_psk_refresh(ctrl->opts->keyring,
759 				       ctrl->opts->host->nqn,
760 				       ctrl->opts->subsysnqn, chap->hash_id,
761 				       tls_psk, psk_len, digest);
762 	if (IS_ERR(tls_key)) {
763 		ret = PTR_ERR(tls_key);
764 		dev_warn(ctrl->device,
765 			 "%s: qid %d failed to insert generated key, error %d\n",
766 			 __func__, chap->qid, ret);
767 		tls_key = NULL;
768 	}
769 	kfree_sensitive(tls_psk);
770 	if (ctrl->opts->tls_key)
771 		nvme_auth_revoke_tls_key(ctrl);
772 	ctrl->opts->tls_key = tls_key;
773 out_free_digest:
774 	kfree_sensitive(digest);
775 out_free_psk:
776 	kfree_sensitive(psk);
777 	return ret;
778 }
779 
780 static void nvme_queue_auth_work(struct work_struct *work)
781 {
782 	struct nvme_dhchap_queue_context *chap =
783 		container_of(work, struct nvme_dhchap_queue_context, auth_work);
784 	struct nvme_ctrl *ctrl = chap->ctrl;
785 	size_t tl;
786 	int ret = 0;
787 
788 	/*
789 	 * Allocate a large enough buffer for the entire negotiation:
790 	 * 4k is enough to ffdhe8192.
791 	 */
792 	chap->buf = mempool_alloc(nvme_chap_buf_pool, GFP_KERNEL);
793 	if (!chap->buf) {
794 		chap->error = -ENOMEM;
795 		return;
796 	}
797 
798 	chap->transaction = ctrl->transaction++;
799 
800 	/* DH-HMAC-CHAP Step 1: send negotiate */
801 	dev_dbg(ctrl->device, "%s: qid %d send negotiate\n",
802 		__func__, chap->qid);
803 	ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap);
804 	if (ret < 0) {
805 		chap->error = ret;
806 		return;
807 	}
808 	tl = ret;
809 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
810 	if (ret) {
811 		chap->error = ret;
812 		return;
813 	}
814 
815 	/* DH-HMAC-CHAP Step 2: receive challenge */
816 	dev_dbg(ctrl->device, "%s: qid %d receive challenge\n",
817 		__func__, chap->qid);
818 
819 	memset(chap->buf, 0, CHAP_BUF_SIZE);
820 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
821 			       false);
822 	if (ret) {
823 		dev_warn(ctrl->device,
824 			 "qid %d failed to receive challenge, %s %d\n",
825 			 chap->qid, ret < 0 ? "error" : "nvme status", ret);
826 		chap->error = ret;
827 		return;
828 	}
829 	ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction,
830 					 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
831 	if (ret) {
832 		chap->status = ret;
833 		chap->error = -EKEYREJECTED;
834 		return;
835 	}
836 
837 	ret = nvme_auth_process_dhchap_challenge(ctrl, chap);
838 	if (ret) {
839 		/* Invalid challenge parameters */
840 		chap->error = ret;
841 		goto fail2;
842 	}
843 
844 	if (chap->ctrl_key_len) {
845 		dev_dbg(ctrl->device,
846 			"%s: qid %d DH exponential\n",
847 			__func__, chap->qid);
848 		ret = nvme_auth_dhchap_exponential(ctrl, chap);
849 		if (ret) {
850 			chap->error = ret;
851 			goto fail2;
852 		}
853 	}
854 
855 	dev_dbg(ctrl->device, "%s: qid %d host response\n",
856 		__func__, chap->qid);
857 	mutex_lock(&ctrl->dhchap_auth_mutex);
858 	ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
859 	mutex_unlock(&ctrl->dhchap_auth_mutex);
860 	if (ret) {
861 		chap->error = ret;
862 		goto fail2;
863 	}
864 
865 	/* DH-HMAC-CHAP Step 3: send reply */
866 	dev_dbg(ctrl->device, "%s: qid %d send reply\n",
867 		__func__, chap->qid);
868 	ret = nvme_auth_set_dhchap_reply_data(ctrl, chap);
869 	if (ret < 0) {
870 		chap->error = ret;
871 		goto fail2;
872 	}
873 
874 	tl = ret;
875 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
876 	if (ret) {
877 		chap->error = ret;
878 		goto fail2;
879 	}
880 
881 	/* DH-HMAC-CHAP Step 4: receive success1 */
882 	dev_dbg(ctrl->device, "%s: qid %d receive success1\n",
883 		__func__, chap->qid);
884 
885 	memset(chap->buf, 0, CHAP_BUF_SIZE);
886 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
887 			       false);
888 	if (ret) {
889 		dev_warn(ctrl->device,
890 			 "qid %d failed to receive success1, %s %d\n",
891 			 chap->qid, ret < 0 ? "error" : "nvme status", ret);
892 		chap->error = ret;
893 		return;
894 	}
895 	ret = nvme_auth_receive_validate(ctrl, chap->qid,
896 					 chap->buf, chap->transaction,
897 					 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
898 	if (ret) {
899 		chap->status = ret;
900 		chap->error = -EKEYREJECTED;
901 		return;
902 	}
903 
904 	mutex_lock(&ctrl->dhchap_auth_mutex);
905 	if (ctrl->ctrl_key) {
906 		dev_dbg(ctrl->device,
907 			"%s: qid %d controller response\n",
908 			__func__, chap->qid);
909 		ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap);
910 		if (ret) {
911 			mutex_unlock(&ctrl->dhchap_auth_mutex);
912 			chap->error = ret;
913 			goto fail2;
914 		}
915 	}
916 	mutex_unlock(&ctrl->dhchap_auth_mutex);
917 
918 	ret = nvme_auth_process_dhchap_success1(ctrl, chap);
919 	if (ret) {
920 		/* Controller authentication failed */
921 		chap->error = -EKEYREJECTED;
922 		goto fail2;
923 	}
924 
925 	if (chap->bi_directional) {
926 		/* DH-HMAC-CHAP Step 5: send success2 */
927 		dev_dbg(ctrl->device, "%s: qid %d send success2\n",
928 			__func__, chap->qid);
929 		tl = nvme_auth_set_dhchap_success2_data(ctrl, chap);
930 		ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
931 		if (ret)
932 			chap->error = ret;
933 	}
934 	if (!ret) {
935 		chap->error = 0;
936 		chap->authenticated = true;
937 		if (ctrl->opts->concat &&
938 		    (ret = nvme_auth_secure_concat(ctrl, chap))) {
939 			dev_warn(ctrl->device,
940 				 "%s: qid %d failed to enable secure concatenation\n",
941 				 __func__, chap->qid);
942 			chap->error = ret;
943 			chap->authenticated = false;
944 		}
945 		return;
946 	}
947 
948 fail2:
949 	if (chap->status == 0)
950 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
951 	dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
952 		__func__, chap->qid, chap->status);
953 	tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
954 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
955 	/*
956 	 * only update error if send failure2 failed and no other
957 	 * error had been set during authentication.
958 	 */
959 	if (ret && !chap->error)
960 		chap->error = ret;
961 }
962 
963 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
964 {
965 	struct nvme_dhchap_queue_context *chap;
966 
967 	if (!ctrl->host_key) {
968 		dev_warn(ctrl->device, "qid %d: no key\n", qid);
969 		return -ENOKEY;
970 	}
971 
972 	if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) {
973 		dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid);
974 		return -ENOKEY;
975 	}
976 
977 	chap = &ctrl->dhchap_ctxs[qid];
978 	cancel_work_sync(&chap->auth_work);
979 	queue_work(nvme_auth_wq, &chap->auth_work);
980 	return 0;
981 }
982 EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
983 
984 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
985 {
986 	struct nvme_dhchap_queue_context *chap;
987 	int ret;
988 
989 	chap = &ctrl->dhchap_ctxs[qid];
990 	flush_work(&chap->auth_work);
991 	ret = chap->error;
992 	/* clear sensitive info */
993 	nvme_auth_reset_dhchap(chap);
994 	return ret;
995 }
996 EXPORT_SYMBOL_GPL(nvme_auth_wait);
997 
998 static void nvme_ctrl_auth_work(struct work_struct *work)
999 {
1000 	struct nvme_ctrl *ctrl =
1001 		container_of(work, struct nvme_ctrl, dhchap_auth_work);
1002 	int ret, q;
1003 
1004 	/*
1005 	 * If the ctrl is no connected, bail as reconnect will handle
1006 	 * authentication.
1007 	 */
1008 	if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
1009 		return;
1010 
1011 	/* Authenticate admin queue first */
1012 	ret = nvme_auth_negotiate(ctrl, 0);
1013 	if (ret) {
1014 		dev_warn(ctrl->device,
1015 			 "qid 0: error %d setting up authentication\n", ret);
1016 		return;
1017 	}
1018 	ret = nvme_auth_wait(ctrl, 0);
1019 	if (ret) {
1020 		dev_warn(ctrl->device,
1021 			 "qid 0: authentication failed\n");
1022 		return;
1023 	}
1024 	/*
1025 	 * Only run authentication on the admin queue for secure concatenation.
1026 	 */
1027 	if (ctrl->opts->concat)
1028 		return;
1029 
1030 	for (q = 1; q < ctrl->queue_count; q++) {
1031 		struct nvme_dhchap_queue_context *chap =
1032 			&ctrl->dhchap_ctxs[q];
1033 		/*
1034 		 * Skip re-authentication if the queue had
1035 		 * not been authenticated initially.
1036 		 */
1037 		if (!chap->authenticated)
1038 			continue;
1039 		cancel_work_sync(&chap->auth_work);
1040 		queue_work(nvme_auth_wq, &chap->auth_work);
1041 	}
1042 
1043 	/*
1044 	 * Failure is a soft-state; credentials remain valid until
1045 	 * the controller terminates the connection.
1046 	 */
1047 	for (q = 1; q < ctrl->queue_count; q++) {
1048 		struct nvme_dhchap_queue_context *chap =
1049 			&ctrl->dhchap_ctxs[q];
1050 		if (!chap->authenticated)
1051 			continue;
1052 		flush_work(&chap->auth_work);
1053 		ret = chap->error;
1054 		nvme_auth_reset_dhchap(chap);
1055 		if (ret)
1056 			dev_warn(ctrl->device,
1057 				 "qid %d: authentication failed\n", q);
1058 	}
1059 }
1060 
1061 int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
1062 {
1063 	struct nvme_dhchap_queue_context *chap;
1064 	int i, ret;
1065 
1066 	mutex_init(&ctrl->dhchap_auth_mutex);
1067 	INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work);
1068 	if (!ctrl->opts)
1069 		return 0;
1070 	ret = nvme_auth_generate_key(ctrl->opts->dhchap_secret,
1071 			&ctrl->host_key);
1072 	if (ret)
1073 		return ret;
1074 	ret = nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret,
1075 			&ctrl->ctrl_key);
1076 	if (ret)
1077 		goto err_free_dhchap_secret;
1078 
1079 	if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret)
1080 		return 0;
1081 
1082 	ctrl->dhchap_ctxs = kvcalloc(ctrl_max_dhchaps(ctrl),
1083 				sizeof(*chap), GFP_KERNEL);
1084 	if (!ctrl->dhchap_ctxs) {
1085 		ret = -ENOMEM;
1086 		goto err_free_dhchap_ctrl_secret;
1087 	}
1088 
1089 	for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) {
1090 		chap = &ctrl->dhchap_ctxs[i];
1091 		chap->qid = i;
1092 		chap->ctrl = ctrl;
1093 		chap->authenticated = false;
1094 		INIT_WORK(&chap->auth_work, nvme_queue_auth_work);
1095 	}
1096 
1097 	return 0;
1098 err_free_dhchap_ctrl_secret:
1099 	nvme_auth_free_key(ctrl->ctrl_key);
1100 	ctrl->ctrl_key = NULL;
1101 err_free_dhchap_secret:
1102 	nvme_auth_free_key(ctrl->host_key);
1103 	ctrl->host_key = NULL;
1104 	return ret;
1105 }
1106 EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
1107 
1108 void nvme_auth_stop(struct nvme_ctrl *ctrl)
1109 {
1110 	cancel_work_sync(&ctrl->dhchap_auth_work);
1111 }
1112 EXPORT_SYMBOL_GPL(nvme_auth_stop);
1113 
1114 void nvme_auth_free(struct nvme_ctrl *ctrl)
1115 {
1116 	int i;
1117 
1118 	if (ctrl->dhchap_ctxs) {
1119 		for (i = 0; i < ctrl_max_dhchaps(ctrl); i++)
1120 			nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]);
1121 		kfree(ctrl->dhchap_ctxs);
1122 	}
1123 	if (ctrl->host_key) {
1124 		nvme_auth_free_key(ctrl->host_key);
1125 		ctrl->host_key = NULL;
1126 	}
1127 	if (ctrl->ctrl_key) {
1128 		nvme_auth_free_key(ctrl->ctrl_key);
1129 		ctrl->ctrl_key = NULL;
1130 	}
1131 }
1132 EXPORT_SYMBOL_GPL(nvme_auth_free);
1133 
1134 int __init nvme_init_auth(void)
1135 {
1136 	nvme_auth_wq = alloc_workqueue("nvme-auth-wq",
1137 			       WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1138 	if (!nvme_auth_wq)
1139 		return -ENOMEM;
1140 
1141 	nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache",
1142 				CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
1143 	if (!nvme_chap_buf_cache)
1144 		goto err_destroy_workqueue;
1145 
1146 	nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab,
1147 			mempool_free_slab, nvme_chap_buf_cache);
1148 	if (!nvme_chap_buf_pool)
1149 		goto err_destroy_chap_buf_cache;
1150 
1151 	return 0;
1152 err_destroy_chap_buf_cache:
1153 	kmem_cache_destroy(nvme_chap_buf_cache);
1154 err_destroy_workqueue:
1155 	destroy_workqueue(nvme_auth_wq);
1156 	return -ENOMEM;
1157 }
1158 
1159 void __exit nvme_exit_auth(void)
1160 {
1161 	mempool_destroy(nvme_chap_buf_pool);
1162 	kmem_cache_destroy(nvme_chap_buf_cache);
1163 	destroy_workqueue(nvme_auth_wq);
1164 }
1165