xref: /linux/drivers/nvme/host/auth.c (revision 844d950bb2cb1fc5b8973369de59cbfb7eecd94d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
4  */
5 
6 #include <linux/crc32.h>
7 #include <linux/base64.h>
8 #include <linux/prandom.h>
9 #include <linux/unaligned.h>
10 #include <crypto/dh.h>
11 #include "nvme.h"
12 #include "fabrics.h"
13 #include <linux/nvme-auth.h>
14 #include <linux/nvme-keyring.h>
15 
16 #define CHAP_BUF_SIZE 4096
17 static struct kmem_cache *nvme_chap_buf_cache;
18 static mempool_t *nvme_chap_buf_pool;
19 
20 struct nvme_dhchap_queue_context {
21 	struct list_head entry;
22 	struct work_struct auth_work;
23 	struct nvme_ctrl *ctrl;
24 	struct crypto_kpp *dh_tfm;
25 	struct nvme_dhchap_key *transformed_key;
26 	void *buf;
27 	int qid;
28 	int error;
29 	u32 s1;
30 	u32 s2;
31 	bool bi_directional;
32 	bool authenticated;
33 	u16 transaction;
34 	u8 status;
35 	u8 dhgroup_id;
36 	u8 hash_id;
37 	u8 sc_c;
38 	size_t hash_len;
39 	u8 c1[NVME_AUTH_MAX_DIGEST_SIZE];
40 	u8 c2[NVME_AUTH_MAX_DIGEST_SIZE];
41 	u8 response[NVME_AUTH_MAX_DIGEST_SIZE];
42 	u8 *ctrl_key;
43 	u8 *host_key;
44 	u8 *sess_key;
45 	int ctrl_key_len;
46 	int host_key_len;
47 	int sess_key_len;
48 };
49 
50 static struct workqueue_struct *nvme_auth_wq;
51 
52 static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl)
53 {
54 	return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues +
55 			ctrl->opts->nr_poll_queues + 1;
56 }
57 
58 static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid,
59 			    void *data, size_t data_len, bool auth_send)
60 {
61 	struct nvme_command cmd = {};
62 	nvme_submit_flags_t flags = NVME_SUBMIT_RETRY;
63 	struct request_queue *q = ctrl->fabrics_q;
64 	int ret;
65 
66 	if (qid != 0) {
67 		flags |= NVME_SUBMIT_NOWAIT | NVME_SUBMIT_RESERVED;
68 		q = ctrl->connect_q;
69 	}
70 
71 	cmd.auth_common.opcode = nvme_fabrics_command;
72 	cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER;
73 	cmd.auth_common.spsp0 = 0x01;
74 	cmd.auth_common.spsp1 = 0x01;
75 	if (auth_send) {
76 		cmd.auth_send.fctype = nvme_fabrics_type_auth_send;
77 		cmd.auth_send.tl = cpu_to_le32(data_len);
78 	} else {
79 		cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive;
80 		cmd.auth_receive.al = cpu_to_le32(data_len);
81 	}
82 
83 	ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len,
84 				     qid == 0 ? NVME_QID_ANY : qid, flags);
85 	if (ret > 0)
86 		dev_warn(ctrl->device,
87 			"qid %d auth_send failed with status %d\n", qid, ret);
88 	else if (ret < 0)
89 		dev_err(ctrl->device,
90 			"qid %d auth_send failed with error %d\n", qid, ret);
91 	return ret;
92 }
93 
94 static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid,
95 		struct nvmf_auth_dhchap_failure_data *data,
96 		u16 transaction, u8 expected_msg)
97 {
98 	dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n",
99 		__func__, qid, data->auth_type, data->auth_id);
100 
101 	if (data->auth_type == NVME_AUTH_COMMON_MESSAGES &&
102 	    data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
103 		return data->rescode_exp;
104 	}
105 	if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES ||
106 	    data->auth_id != expected_msg) {
107 		dev_warn(ctrl->device,
108 			 "qid %d invalid message %02x/%02x\n",
109 			 qid, data->auth_type, data->auth_id);
110 		return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
111 	}
112 	if (le16_to_cpu(data->t_id) != transaction) {
113 		dev_warn(ctrl->device,
114 			 "qid %d invalid transaction ID %d\n",
115 			 qid, le16_to_cpu(data->t_id));
116 		return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
117 	}
118 	return 0;
119 }
120 
121 static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl,
122 		struct nvme_dhchap_queue_context *chap)
123 {
124 	struct nvmf_auth_dhchap_negotiate_data *data = chap->buf;
125 	size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol);
126 
127 	if (size > CHAP_BUF_SIZE) {
128 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
129 		return -EINVAL;
130 	}
131 	memset((u8 *)chap->buf, 0, size);
132 	data->auth_type = NVME_AUTH_COMMON_MESSAGES;
133 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
134 	data->t_id = cpu_to_le16(chap->transaction);
135 	if (ctrl->opts->concat && chap->qid == 0) {
136 		if (ctrl->opts->tls_key)
137 			data->sc_c = NVME_AUTH_SECP_REPLACETLSPSK;
138 		else
139 			data->sc_c = NVME_AUTH_SECP_NEWTLSPSK;
140 	} else
141 		data->sc_c = NVME_AUTH_SECP_NOSC;
142 	data->napd = 1;
143 	data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID;
144 	data->auth_protocol[0].dhchap.halen = 3;
145 	data->auth_protocol[0].dhchap.dhlen = 6;
146 	data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256;
147 	data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384;
148 	data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512;
149 	data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL;
150 	data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048;
151 	data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072;
152 	data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096;
153 	data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144;
154 	data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192;
155 
156 	chap->sc_c = data->sc_c;
157 
158 	return size;
159 }
160 
161 static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl,
162 		struct nvme_dhchap_queue_context *chap)
163 {
164 	struct nvmf_auth_dhchap_challenge_data *data = chap->buf;
165 	u16 dhvlen = le16_to_cpu(data->dhvlen);
166 	size_t size = sizeof(*data) + data->hl + dhvlen;
167 	const char *gid_name = nvme_auth_dhgroup_name(data->dhgid);
168 	const char *hmac_name, *kpp_name;
169 
170 	if (size > CHAP_BUF_SIZE) {
171 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
172 		return -EINVAL;
173 	}
174 
175 	hmac_name = nvme_auth_hmac_name(data->hashid);
176 	if (!hmac_name) {
177 		dev_warn(ctrl->device,
178 			 "qid %d: invalid HASH ID %d\n",
179 			 chap->qid, data->hashid);
180 		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
181 		return -EPROTO;
182 	}
183 
184 	if (chap->hash_id == data->hashid && chap->hash_len == data->hl) {
185 		dev_dbg(ctrl->device,
186 			"qid %d: reuse existing hash %s\n",
187 			chap->qid, hmac_name);
188 		goto select_kpp;
189 	}
190 
191 	if (nvme_auth_hmac_hash_len(data->hashid) != data->hl) {
192 		dev_warn(ctrl->device,
193 			 "qid %d: invalid hash length %d\n",
194 			 chap->qid, data->hl);
195 		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
196 		return -EPROTO;
197 	}
198 
199 	chap->hash_id = data->hashid;
200 	chap->hash_len = data->hl;
201 	dev_dbg(ctrl->device, "qid %d: selected hash %s\n",
202 		chap->qid, hmac_name);
203 
204 select_kpp:
205 	kpp_name = nvme_auth_dhgroup_kpp(data->dhgid);
206 	if (!kpp_name) {
207 		dev_warn(ctrl->device,
208 			 "qid %d: invalid DH group id %d\n",
209 			 chap->qid, data->dhgid);
210 		chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
211 		/* Leave previous dh_tfm intact */
212 		return -EPROTO;
213 	}
214 
215 	if (chap->dhgroup_id == data->dhgid &&
216 	    (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) {
217 		dev_dbg(ctrl->device,
218 			"qid %d: reuse existing DH group %s\n",
219 			chap->qid, gid_name);
220 		goto skip_kpp;
221 	}
222 
223 	/* Reset dh_tfm if it can't be reused */
224 	if (chap->dh_tfm) {
225 		crypto_free_kpp(chap->dh_tfm);
226 		chap->dh_tfm = NULL;
227 	}
228 
229 	if (data->dhgid != NVME_AUTH_DHGROUP_NULL) {
230 		if (dhvlen == 0) {
231 			dev_warn(ctrl->device,
232 				 "qid %d: empty DH value\n",
233 				 chap->qid);
234 			chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
235 			return -EPROTO;
236 		}
237 
238 		chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0);
239 		if (IS_ERR(chap->dh_tfm)) {
240 			int ret = PTR_ERR(chap->dh_tfm);
241 
242 			dev_warn(ctrl->device,
243 				 "qid %d: error %d initializing DH group %s\n",
244 				 chap->qid, ret, gid_name);
245 			chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
246 			chap->dh_tfm = NULL;
247 			return ret;
248 		}
249 		dev_dbg(ctrl->device, "qid %d: selected DH group %s\n",
250 			chap->qid, gid_name);
251 	} else if (dhvlen != 0) {
252 		dev_warn(ctrl->device,
253 			 "qid %d: invalid DH value for NULL DH\n",
254 			 chap->qid);
255 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
256 		return -EPROTO;
257 	}
258 	chap->dhgroup_id = data->dhgid;
259 
260 skip_kpp:
261 	chap->s1 = le32_to_cpu(data->seqnum);
262 	memcpy(chap->c1, data->cval, chap->hash_len);
263 	if (dhvlen) {
264 		chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL);
265 		if (!chap->ctrl_key) {
266 			chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
267 			return -ENOMEM;
268 		}
269 		chap->ctrl_key_len = dhvlen;
270 		memcpy(chap->ctrl_key, data->cval + chap->hash_len,
271 		       dhvlen);
272 		dev_dbg(ctrl->device, "ctrl public key %*ph\n",
273 			 (int)chap->ctrl_key_len, chap->ctrl_key);
274 	}
275 
276 	return 0;
277 }
278 
279 static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl,
280 		struct nvme_dhchap_queue_context *chap)
281 {
282 	struct nvmf_auth_dhchap_reply_data *data = chap->buf;
283 	size_t size = sizeof(*data);
284 
285 	size += 2 * chap->hash_len;
286 
287 	if (chap->host_key_len)
288 		size += chap->host_key_len;
289 
290 	if (size > CHAP_BUF_SIZE) {
291 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
292 		return -EINVAL;
293 	}
294 
295 	memset(chap->buf, 0, size);
296 	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
297 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
298 	data->t_id = cpu_to_le16(chap->transaction);
299 	data->hl = chap->hash_len;
300 	data->dhvlen = cpu_to_le16(chap->host_key_len);
301 	memcpy(data->rval, chap->response, chap->hash_len);
302 	if (ctrl->ctrl_key)
303 		chap->bi_directional = true;
304 	if (ctrl->ctrl_key || ctrl->opts->concat) {
305 		get_random_bytes(chap->c2, chap->hash_len);
306 		data->cvalid = 1;
307 		memcpy(data->rval + chap->hash_len, chap->c2,
308 		       chap->hash_len);
309 		dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n",
310 			__func__, chap->qid, (int)chap->hash_len, chap->c2);
311 	} else {
312 		memset(chap->c2, 0, chap->hash_len);
313 	}
314 	if (ctrl->opts->concat) {
315 		chap->s2 = 0;
316 		chap->bi_directional = false;
317 	} else
318 		chap->s2 = nvme_auth_get_seqnum();
319 	data->seqnum = cpu_to_le32(chap->s2);
320 	if (chap->host_key_len) {
321 		dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n",
322 			__func__, chap->qid,
323 			chap->host_key_len, chap->host_key);
324 		memcpy(data->rval + 2 * chap->hash_len, chap->host_key,
325 		       chap->host_key_len);
326 	}
327 
328 	return size;
329 }
330 
331 static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl,
332 		struct nvme_dhchap_queue_context *chap)
333 {
334 	struct nvmf_auth_dhchap_success1_data *data = chap->buf;
335 	size_t size = sizeof(*data) + chap->hash_len;
336 
337 	if (size > CHAP_BUF_SIZE) {
338 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
339 		return -EINVAL;
340 	}
341 
342 	if (data->hl != chap->hash_len) {
343 		dev_warn(ctrl->device,
344 			 "qid %d: invalid hash length %u\n",
345 			 chap->qid, data->hl);
346 		chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
347 		return -EPROTO;
348 	}
349 
350 	/* Just print out information for the admin queue */
351 	if (chap->qid == 0)
352 		dev_info(ctrl->device,
353 			 "qid 0: authenticated with hash %s dhgroup %s\n",
354 			 nvme_auth_hmac_name(chap->hash_id),
355 			 nvme_auth_dhgroup_name(chap->dhgroup_id));
356 
357 	if (!data->rvalid)
358 		return 0;
359 
360 	/* Validate controller response */
361 	if (memcmp(chap->response, data->rval, data->hl)) {
362 		dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n",
363 			__func__, chap->qid, (int)chap->hash_len, data->rval);
364 		dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n",
365 			__func__, chap->qid, (int)chap->hash_len,
366 			chap->response);
367 		dev_warn(ctrl->device,
368 			 "qid %d: controller authentication failed\n",
369 			 chap->qid);
370 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
371 		return -ECONNREFUSED;
372 	}
373 
374 	/* Just print out information for the admin queue */
375 	if (chap->qid == 0)
376 		dev_info(ctrl->device,
377 			 "qid 0: controller authenticated\n");
378 	return 0;
379 }
380 
381 static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl,
382 		struct nvme_dhchap_queue_context *chap)
383 {
384 	struct nvmf_auth_dhchap_success2_data *data = chap->buf;
385 	size_t size = sizeof(*data);
386 
387 	memset(chap->buf, 0, size);
388 	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
389 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
390 	data->t_id = cpu_to_le16(chap->transaction);
391 
392 	return size;
393 }
394 
395 static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl,
396 		struct nvme_dhchap_queue_context *chap)
397 {
398 	struct nvmf_auth_dhchap_failure_data *data = chap->buf;
399 	size_t size = sizeof(*data);
400 
401 	memset(chap->buf, 0, size);
402 	data->auth_type = NVME_AUTH_COMMON_MESSAGES;
403 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
404 	data->t_id = cpu_to_le16(chap->transaction);
405 	data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
406 	data->rescode_exp = chap->status;
407 
408 	return size;
409 }
410 
411 static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl,
412 		struct nvme_dhchap_queue_context *chap)
413 {
414 	struct nvme_auth_hmac_ctx hmac;
415 	u8 buf[4], *challenge = chap->c1;
416 	int ret;
417 
418 	dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n",
419 		__func__, chap->qid, chap->s1, chap->transaction);
420 
421 	if (!chap->transformed_key) {
422 		chap->transformed_key = nvme_auth_transform_key(ctrl->host_key,
423 						ctrl->opts->host->nqn);
424 		if (IS_ERR(chap->transformed_key)) {
425 			ret = PTR_ERR(chap->transformed_key);
426 			chap->transformed_key = NULL;
427 			return ret;
428 		}
429 	} else {
430 		dev_dbg(ctrl->device, "%s: qid %d re-using host response\n",
431 			__func__, chap->qid);
432 	}
433 
434 	ret = nvme_auth_hmac_init(&hmac, chap->hash_id,
435 				  chap->transformed_key->key,
436 				  chap->transformed_key->len);
437 	if (ret)
438 		goto out;
439 
440 	if (chap->dh_tfm) {
441 		challenge = kmalloc(chap->hash_len, GFP_KERNEL);
442 		if (!challenge) {
443 			ret = -ENOMEM;
444 			goto out;
445 		}
446 		ret = nvme_auth_augmented_challenge(chap->hash_id,
447 						    chap->sess_key,
448 						    chap->sess_key_len,
449 						    chap->c1, challenge,
450 						    chap->hash_len);
451 		if (ret)
452 			goto out;
453 	}
454 
455 	nvme_auth_hmac_update(&hmac, challenge, chap->hash_len);
456 
457 	put_unaligned_le32(chap->s1, buf);
458 	nvme_auth_hmac_update(&hmac, buf, 4);
459 
460 	put_unaligned_le16(chap->transaction, buf);
461 	nvme_auth_hmac_update(&hmac, buf, 2);
462 
463 	*buf = chap->sc_c;
464 	nvme_auth_hmac_update(&hmac, buf, 1);
465 	nvme_auth_hmac_update(&hmac, "HostHost", 8);
466 	nvme_auth_hmac_update(&hmac, ctrl->opts->host->nqn,
467 			      strlen(ctrl->opts->host->nqn));
468 	memset(buf, 0, sizeof(buf));
469 	nvme_auth_hmac_update(&hmac, buf, 1);
470 	nvme_auth_hmac_update(&hmac, ctrl->opts->subsysnqn,
471 			      strlen(ctrl->opts->subsysnqn));
472 	nvme_auth_hmac_final(&hmac, chap->response);
473 	ret = 0;
474 out:
475 	if (challenge != chap->c1)
476 		kfree(challenge);
477 	memzero_explicit(&hmac, sizeof(hmac));
478 	return ret;
479 }
480 
481 static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl,
482 		struct nvme_dhchap_queue_context *chap)
483 {
484 	struct nvme_auth_hmac_ctx hmac;
485 	struct nvme_dhchap_key *transformed_key;
486 	u8 buf[4], *challenge = chap->c2;
487 	int ret;
488 
489 	transformed_key = nvme_auth_transform_key(ctrl->ctrl_key,
490 				ctrl->opts->subsysnqn);
491 	if (IS_ERR(transformed_key)) {
492 		ret = PTR_ERR(transformed_key);
493 		return ret;
494 	}
495 
496 	ret = nvme_auth_hmac_init(&hmac, chap->hash_id, transformed_key->key,
497 				  transformed_key->len);
498 	if (ret) {
499 		dev_warn(ctrl->device, "qid %d: failed to init hmac, error %d\n",
500 			 chap->qid, ret);
501 		goto out;
502 	}
503 
504 	if (chap->dh_tfm) {
505 		challenge = kmalloc(chap->hash_len, GFP_KERNEL);
506 		if (!challenge) {
507 			ret = -ENOMEM;
508 			goto out;
509 		}
510 		ret = nvme_auth_augmented_challenge(chap->hash_id,
511 						    chap->sess_key,
512 						    chap->sess_key_len,
513 						    chap->c2, challenge,
514 						    chap->hash_len);
515 		if (ret)
516 			goto out;
517 	}
518 	dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n",
519 		__func__, chap->qid, chap->s2, chap->transaction);
520 	dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n",
521 		__func__, chap->qid, (int)chap->hash_len, challenge);
522 	dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n",
523 		__func__, chap->qid, ctrl->opts->subsysnqn);
524 	dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n",
525 		__func__, chap->qid, ctrl->opts->host->nqn);
526 
527 	nvme_auth_hmac_update(&hmac, challenge, chap->hash_len);
528 
529 	put_unaligned_le32(chap->s2, buf);
530 	nvme_auth_hmac_update(&hmac, buf, 4);
531 
532 	put_unaligned_le16(chap->transaction, buf);
533 	nvme_auth_hmac_update(&hmac, buf, 2);
534 
535 	memset(buf, 0, 4);
536 	nvme_auth_hmac_update(&hmac, buf, 1);
537 	nvme_auth_hmac_update(&hmac, "Controller", 10);
538 	nvme_auth_hmac_update(&hmac, ctrl->opts->subsysnqn,
539 			      strlen(ctrl->opts->subsysnqn));
540 	nvme_auth_hmac_update(&hmac, buf, 1);
541 	nvme_auth_hmac_update(&hmac, ctrl->opts->host->nqn,
542 			      strlen(ctrl->opts->host->nqn));
543 	nvme_auth_hmac_final(&hmac, chap->response);
544 	ret = 0;
545 out:
546 	if (challenge != chap->c2)
547 		kfree(challenge);
548 	memzero_explicit(&hmac, sizeof(hmac));
549 	nvme_auth_free_key(transformed_key);
550 	return ret;
551 }
552 
553 static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl,
554 		struct nvme_dhchap_queue_context *chap)
555 {
556 	int ret;
557 
558 	if (chap->host_key && chap->host_key_len) {
559 		dev_dbg(ctrl->device,
560 			"qid %d: reusing host key\n", chap->qid);
561 		goto gen_sesskey;
562 	}
563 	ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id);
564 	if (ret < 0) {
565 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
566 		return ret;
567 	}
568 
569 	chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm);
570 
571 	chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL);
572 	if (!chap->host_key) {
573 		chap->host_key_len = 0;
574 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
575 		return -ENOMEM;
576 	}
577 	ret = nvme_auth_gen_pubkey(chap->dh_tfm,
578 				   chap->host_key, chap->host_key_len);
579 	if (ret) {
580 		dev_dbg(ctrl->device,
581 			"failed to generate public key, error %d\n", ret);
582 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
583 		return ret;
584 	}
585 
586 gen_sesskey:
587 	chap->sess_key_len = chap->host_key_len;
588 	chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL);
589 	if (!chap->sess_key) {
590 		chap->sess_key_len = 0;
591 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
592 		return -ENOMEM;
593 	}
594 
595 	ret = nvme_auth_gen_shared_secret(chap->dh_tfm,
596 					  chap->ctrl_key, chap->ctrl_key_len,
597 					  chap->sess_key, chap->sess_key_len);
598 	if (ret) {
599 		dev_dbg(ctrl->device,
600 			"failed to generate shared secret, error %d\n", ret);
601 		chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
602 		return ret;
603 	}
604 	dev_dbg(ctrl->device, "shared secret %*ph\n",
605 		(int)chap->sess_key_len, chap->sess_key);
606 	return 0;
607 }
608 
609 static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap)
610 {
611 	nvme_auth_free_key(chap->transformed_key);
612 	chap->transformed_key = NULL;
613 	kfree_sensitive(chap->host_key);
614 	chap->host_key = NULL;
615 	chap->host_key_len = 0;
616 	kfree_sensitive(chap->ctrl_key);
617 	chap->ctrl_key = NULL;
618 	chap->ctrl_key_len = 0;
619 	kfree_sensitive(chap->sess_key);
620 	chap->sess_key = NULL;
621 	chap->sess_key_len = 0;
622 	chap->status = 0;
623 	chap->error = 0;
624 	chap->s1 = 0;
625 	chap->s2 = 0;
626 	chap->bi_directional = false;
627 	chap->transaction = 0;
628 	memset(chap->c1, 0, sizeof(chap->c1));
629 	memset(chap->c2, 0, sizeof(chap->c2));
630 	mempool_free(chap->buf, nvme_chap_buf_pool);
631 	chap->buf = NULL;
632 }
633 
634 static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap)
635 {
636 	nvme_auth_reset_dhchap(chap);
637 	chap->authenticated = false;
638 	if (chap->dh_tfm)
639 		crypto_free_kpp(chap->dh_tfm);
640 }
641 
642 void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl)
643 {
644 	dev_dbg(ctrl->device, "Wipe generated TLS PSK %08x\n",
645 		key_serial(ctrl->opts->tls_key));
646 	key_revoke(ctrl->opts->tls_key);
647 	key_put(ctrl->opts->tls_key);
648 	ctrl->opts->tls_key = NULL;
649 }
650 EXPORT_SYMBOL_GPL(nvme_auth_revoke_tls_key);
651 
652 static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl,
653 				   struct nvme_dhchap_queue_context *chap)
654 {
655 	u8 *psk, *tls_psk;
656 	char *digest;
657 	struct key *tls_key;
658 	size_t psk_len;
659 	int ret = 0;
660 
661 	if (!chap->sess_key) {
662 		dev_warn(ctrl->device,
663 			 "%s: qid %d no session key negotiated\n",
664 			 __func__, chap->qid);
665 		return -ENOKEY;
666 	}
667 
668 	if (chap->qid) {
669 		dev_warn(ctrl->device,
670 			 "qid %d: secure concatenation not supported on I/O queues\n",
671 			 chap->qid);
672 		return -EINVAL;
673 	}
674 	ret = nvme_auth_generate_psk(chap->hash_id, chap->sess_key,
675 				     chap->sess_key_len,
676 				     chap->c1, chap->c2,
677 				     chap->hash_len, &psk, &psk_len);
678 	if (ret) {
679 		dev_warn(ctrl->device,
680 			 "%s: qid %d failed to generate PSK, error %d\n",
681 			 __func__, chap->qid, ret);
682 		return ret;
683 	}
684 	dev_dbg(ctrl->device,
685 		  "%s: generated psk %*ph\n", __func__, (int)psk_len, psk);
686 
687 	ret = nvme_auth_generate_digest(chap->hash_id, psk, psk_len,
688 					ctrl->opts->subsysnqn,
689 					ctrl->opts->host->nqn, &digest);
690 	if (ret) {
691 		dev_warn(ctrl->device,
692 			 "%s: qid %d failed to generate digest, error %d\n",
693 			 __func__, chap->qid, ret);
694 		goto out_free_psk;
695 	}
696 	dev_dbg(ctrl->device, "%s: generated digest %s\n",
697 		 __func__, digest);
698 	ret = nvme_auth_derive_tls_psk(chap->hash_id, psk, psk_len,
699 				       digest, &tls_psk);
700 	if (ret) {
701 		dev_warn(ctrl->device,
702 			 "%s: qid %d failed to derive TLS psk, error %d\n",
703 			 __func__, chap->qid, ret);
704 		goto out_free_digest;
705 	}
706 
707 	tls_key = nvme_tls_psk_refresh(ctrl->opts->keyring,
708 				       ctrl->opts->host->nqn,
709 				       ctrl->opts->subsysnqn, chap->hash_id,
710 				       tls_psk, psk_len, digest);
711 	if (IS_ERR(tls_key)) {
712 		ret = PTR_ERR(tls_key);
713 		dev_warn(ctrl->device,
714 			 "%s: qid %d failed to insert generated key, error %d\n",
715 			 __func__, chap->qid, ret);
716 		tls_key = NULL;
717 	}
718 	kfree_sensitive(tls_psk);
719 	if (ctrl->opts->tls_key)
720 		nvme_auth_revoke_tls_key(ctrl);
721 	ctrl->opts->tls_key = tls_key;
722 out_free_digest:
723 	kfree_sensitive(digest);
724 out_free_psk:
725 	kfree_sensitive(psk);
726 	return ret;
727 }
728 
729 static void nvme_queue_auth_work(struct work_struct *work)
730 {
731 	struct nvme_dhchap_queue_context *chap =
732 		container_of(work, struct nvme_dhchap_queue_context, auth_work);
733 	struct nvme_ctrl *ctrl = chap->ctrl;
734 	size_t tl;
735 	int ret = 0;
736 
737 	/*
738 	 * Allocate a large enough buffer for the entire negotiation:
739 	 * 4k is enough to ffdhe8192.
740 	 */
741 	chap->buf = mempool_alloc(nvme_chap_buf_pool, GFP_KERNEL);
742 	if (!chap->buf) {
743 		chap->error = -ENOMEM;
744 		return;
745 	}
746 
747 	chap->transaction = ctrl->transaction++;
748 
749 	/* DH-HMAC-CHAP Step 1: send negotiate */
750 	dev_dbg(ctrl->device, "%s: qid %d send negotiate\n",
751 		__func__, chap->qid);
752 	ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap);
753 	if (ret < 0) {
754 		chap->error = ret;
755 		return;
756 	}
757 	tl = ret;
758 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
759 	if (ret) {
760 		chap->error = ret;
761 		return;
762 	}
763 
764 	/* DH-HMAC-CHAP Step 2: receive challenge */
765 	dev_dbg(ctrl->device, "%s: qid %d receive challenge\n",
766 		__func__, chap->qid);
767 
768 	memset(chap->buf, 0, CHAP_BUF_SIZE);
769 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
770 			       false);
771 	if (ret) {
772 		dev_warn(ctrl->device,
773 			 "qid %d failed to receive challenge, %s %d\n",
774 			 chap->qid, ret < 0 ? "error" : "nvme status", ret);
775 		chap->error = ret;
776 		return;
777 	}
778 	ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction,
779 					 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE);
780 	if (ret) {
781 		chap->status = ret;
782 		chap->error = -EKEYREJECTED;
783 		return;
784 	}
785 
786 	ret = nvme_auth_process_dhchap_challenge(ctrl, chap);
787 	if (ret) {
788 		/* Invalid challenge parameters */
789 		chap->error = ret;
790 		goto fail2;
791 	}
792 
793 	if (chap->ctrl_key_len) {
794 		dev_dbg(ctrl->device,
795 			"%s: qid %d DH exponential\n",
796 			__func__, chap->qid);
797 		ret = nvme_auth_dhchap_exponential(ctrl, chap);
798 		if (ret) {
799 			chap->error = ret;
800 			goto fail2;
801 		}
802 	}
803 
804 	dev_dbg(ctrl->device, "%s: qid %d host response\n",
805 		__func__, chap->qid);
806 	mutex_lock(&ctrl->dhchap_auth_mutex);
807 	ret = nvme_auth_dhchap_setup_host_response(ctrl, chap);
808 	mutex_unlock(&ctrl->dhchap_auth_mutex);
809 	if (ret) {
810 		chap->error = ret;
811 		goto fail2;
812 	}
813 
814 	/* DH-HMAC-CHAP Step 3: send reply */
815 	dev_dbg(ctrl->device, "%s: qid %d send reply\n",
816 		__func__, chap->qid);
817 	ret = nvme_auth_set_dhchap_reply_data(ctrl, chap);
818 	if (ret < 0) {
819 		chap->error = ret;
820 		goto fail2;
821 	}
822 
823 	tl = ret;
824 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
825 	if (ret) {
826 		chap->error = ret;
827 		goto fail2;
828 	}
829 
830 	/* DH-HMAC-CHAP Step 4: receive success1 */
831 	dev_dbg(ctrl->device, "%s: qid %d receive success1\n",
832 		__func__, chap->qid);
833 
834 	memset(chap->buf, 0, CHAP_BUF_SIZE);
835 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE,
836 			       false);
837 	if (ret) {
838 		dev_warn(ctrl->device,
839 			 "qid %d failed to receive success1, %s %d\n",
840 			 chap->qid, ret < 0 ? "error" : "nvme status", ret);
841 		chap->error = ret;
842 		return;
843 	}
844 	ret = nvme_auth_receive_validate(ctrl, chap->qid,
845 					 chap->buf, chap->transaction,
846 					 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1);
847 	if (ret) {
848 		chap->status = ret;
849 		chap->error = -EKEYREJECTED;
850 		return;
851 	}
852 
853 	mutex_lock(&ctrl->dhchap_auth_mutex);
854 	if (ctrl->ctrl_key) {
855 		dev_dbg(ctrl->device,
856 			"%s: qid %d controller response\n",
857 			__func__, chap->qid);
858 		ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap);
859 		if (ret) {
860 			mutex_unlock(&ctrl->dhchap_auth_mutex);
861 			chap->error = ret;
862 			goto fail2;
863 		}
864 	}
865 	mutex_unlock(&ctrl->dhchap_auth_mutex);
866 
867 	ret = nvme_auth_process_dhchap_success1(ctrl, chap);
868 	if (ret) {
869 		/* Controller authentication failed */
870 		chap->error = -EKEYREJECTED;
871 		goto fail2;
872 	}
873 
874 	if (chap->bi_directional) {
875 		/* DH-HMAC-CHAP Step 5: send success2 */
876 		dev_dbg(ctrl->device, "%s: qid %d send success2\n",
877 			__func__, chap->qid);
878 		tl = nvme_auth_set_dhchap_success2_data(ctrl, chap);
879 		ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
880 		if (ret)
881 			chap->error = ret;
882 	}
883 	if (!ret) {
884 		chap->error = 0;
885 		chap->authenticated = true;
886 		if (ctrl->opts->concat &&
887 		    (ret = nvme_auth_secure_concat(ctrl, chap))) {
888 			dev_warn(ctrl->device,
889 				 "%s: qid %d failed to enable secure concatenation\n",
890 				 __func__, chap->qid);
891 			chap->error = ret;
892 			chap->authenticated = false;
893 		}
894 		return;
895 	}
896 
897 fail2:
898 	if (chap->status == 0)
899 		chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
900 	dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n",
901 		__func__, chap->qid, chap->status);
902 	tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap);
903 	ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true);
904 	/*
905 	 * only update error if send failure2 failed and no other
906 	 * error had been set during authentication.
907 	 */
908 	if (ret && !chap->error)
909 		chap->error = ret;
910 }
911 
912 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid)
913 {
914 	struct nvme_dhchap_queue_context *chap;
915 
916 	if (!ctrl->host_key) {
917 		dev_warn(ctrl->device, "qid %d: no key\n", qid);
918 		return -ENOKEY;
919 	}
920 
921 	if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) {
922 		dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid);
923 		return -ENOKEY;
924 	}
925 
926 	chap = &ctrl->dhchap_ctxs[qid];
927 	cancel_work_sync(&chap->auth_work);
928 	queue_work(nvme_auth_wq, &chap->auth_work);
929 	return 0;
930 }
931 EXPORT_SYMBOL_GPL(nvme_auth_negotiate);
932 
933 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid)
934 {
935 	struct nvme_dhchap_queue_context *chap;
936 	int ret;
937 
938 	chap = &ctrl->dhchap_ctxs[qid];
939 	flush_work(&chap->auth_work);
940 	ret = chap->error;
941 	/* clear sensitive info */
942 	nvme_auth_reset_dhchap(chap);
943 	return ret;
944 }
945 EXPORT_SYMBOL_GPL(nvme_auth_wait);
946 
947 static void nvme_ctrl_auth_work(struct work_struct *work)
948 {
949 	struct nvme_ctrl *ctrl =
950 		container_of(work, struct nvme_ctrl, dhchap_auth_work);
951 	int ret, q;
952 
953 	/*
954 	 * If the ctrl is no connected, bail as reconnect will handle
955 	 * authentication.
956 	 */
957 	if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE)
958 		return;
959 
960 	/* Authenticate admin queue first */
961 	ret = nvme_auth_negotiate(ctrl, 0);
962 	if (ret) {
963 		dev_warn(ctrl->device,
964 			 "qid 0: error %d setting up authentication\n", ret);
965 		return;
966 	}
967 	ret = nvme_auth_wait(ctrl, 0);
968 	if (ret) {
969 		dev_warn(ctrl->device,
970 			 "qid 0: authentication failed\n");
971 		return;
972 	}
973 	/*
974 	 * Only run authentication on the admin queue for secure concatenation.
975 	 */
976 	if (ctrl->opts->concat)
977 		return;
978 
979 	for (q = 1; q < ctrl->queue_count; q++) {
980 		struct nvme_dhchap_queue_context *chap =
981 			&ctrl->dhchap_ctxs[q];
982 		/*
983 		 * Skip re-authentication if the queue had
984 		 * not been authenticated initially.
985 		 */
986 		if (!chap->authenticated)
987 			continue;
988 		cancel_work_sync(&chap->auth_work);
989 		queue_work(nvme_auth_wq, &chap->auth_work);
990 	}
991 
992 	/*
993 	 * Failure is a soft-state; credentials remain valid until
994 	 * the controller terminates the connection.
995 	 */
996 	for (q = 1; q < ctrl->queue_count; q++) {
997 		struct nvme_dhchap_queue_context *chap =
998 			&ctrl->dhchap_ctxs[q];
999 		if (!chap->authenticated)
1000 			continue;
1001 		flush_work(&chap->auth_work);
1002 		ret = chap->error;
1003 		nvme_auth_reset_dhchap(chap);
1004 		if (ret)
1005 			dev_warn(ctrl->device,
1006 				 "qid %d: authentication failed\n", q);
1007 	}
1008 }
1009 
1010 int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl)
1011 {
1012 	struct nvme_dhchap_queue_context *chap;
1013 	int i, ret;
1014 
1015 	mutex_init(&ctrl->dhchap_auth_mutex);
1016 	INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work);
1017 	if (!ctrl->opts)
1018 		return 0;
1019 	ret = nvme_auth_parse_key(ctrl->opts->dhchap_secret, &ctrl->host_key);
1020 	if (ret)
1021 		return ret;
1022 	ret = nvme_auth_parse_key(ctrl->opts->dhchap_ctrl_secret,
1023 				  &ctrl->ctrl_key);
1024 	if (ret)
1025 		goto err_free_dhchap_secret;
1026 
1027 	if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret)
1028 		return 0;
1029 
1030 	ctrl->dhchap_ctxs = kvzalloc_objs(*chap, ctrl_max_dhchaps(ctrl));
1031 	if (!ctrl->dhchap_ctxs) {
1032 		ret = -ENOMEM;
1033 		goto err_free_dhchap_ctrl_secret;
1034 	}
1035 
1036 	for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) {
1037 		chap = &ctrl->dhchap_ctxs[i];
1038 		chap->qid = i;
1039 		chap->ctrl = ctrl;
1040 		chap->authenticated = false;
1041 		INIT_WORK(&chap->auth_work, nvme_queue_auth_work);
1042 	}
1043 
1044 	return 0;
1045 err_free_dhchap_ctrl_secret:
1046 	nvme_auth_free_key(ctrl->ctrl_key);
1047 	ctrl->ctrl_key = NULL;
1048 err_free_dhchap_secret:
1049 	nvme_auth_free_key(ctrl->host_key);
1050 	ctrl->host_key = NULL;
1051 	return ret;
1052 }
1053 EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl);
1054 
1055 void nvme_auth_stop(struct nvme_ctrl *ctrl)
1056 {
1057 	cancel_work_sync(&ctrl->dhchap_auth_work);
1058 }
1059 EXPORT_SYMBOL_GPL(nvme_auth_stop);
1060 
1061 void nvme_auth_free(struct nvme_ctrl *ctrl)
1062 {
1063 	int i;
1064 
1065 	if (ctrl->dhchap_ctxs) {
1066 		for (i = 0; i < ctrl_max_dhchaps(ctrl); i++)
1067 			nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]);
1068 		kvfree(ctrl->dhchap_ctxs);
1069 	}
1070 	if (ctrl->host_key) {
1071 		nvme_auth_free_key(ctrl->host_key);
1072 		ctrl->host_key = NULL;
1073 	}
1074 	if (ctrl->ctrl_key) {
1075 		nvme_auth_free_key(ctrl->ctrl_key);
1076 		ctrl->ctrl_key = NULL;
1077 	}
1078 }
1079 EXPORT_SYMBOL_GPL(nvme_auth_free);
1080 
1081 int __init nvme_init_auth(void)
1082 {
1083 	nvme_auth_wq = alloc_workqueue("nvme-auth-wq",
1084 			       WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1085 	if (!nvme_auth_wq)
1086 		return -ENOMEM;
1087 
1088 	nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache",
1089 				CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL);
1090 	if (!nvme_chap_buf_cache)
1091 		goto err_destroy_workqueue;
1092 
1093 	nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab,
1094 			mempool_free_slab, nvme_chap_buf_cache);
1095 	if (!nvme_chap_buf_pool)
1096 		goto err_destroy_chap_buf_cache;
1097 
1098 	return 0;
1099 err_destroy_chap_buf_cache:
1100 	kmem_cache_destroy(nvme_chap_buf_cache);
1101 err_destroy_workqueue:
1102 	destroy_workqueue(nvme_auth_wq);
1103 	return -ENOMEM;
1104 }
1105 
1106 void __exit nvme_exit_auth(void)
1107 {
1108 	mempool_destroy(nvme_chap_buf_pool);
1109 	kmem_cache_destroy(nvme_chap_buf_cache);
1110 	destroy_workqueue(nvme_auth_wq);
1111 }
1112