xref: /linux/drivers/nvme/target/fabrics-cmd-auth.c (revision 844d950bb2cb1fc5b8973369de59cbfb7eecd94d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe over Fabrics DH-HMAC-CHAP authentication command handling.
4  * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
5  * All rights reserved.
6  */
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/blkdev.h>
9 #include <linux/random.h>
10 #include <linux/nvme-auth.h>
11 #include <crypto/kpp.h>
12 #include "nvmet.h"
13 
14 static void nvmet_auth_expired_work(struct work_struct *work)
15 {
16 	struct nvmet_sq *sq = container_of(to_delayed_work(work),
17 			struct nvmet_sq, auth_expired_work);
18 
19 	pr_debug("%s: ctrl %d qid %d transaction %u expired, resetting\n",
20 		 __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid);
21 	sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
22 	sq->dhchap_tid = -1;
23 }
24 
25 void nvmet_auth_sq_init(struct nvmet_sq *sq)
26 {
27 	/* Initialize in-band authentication */
28 	INIT_DELAYED_WORK(&sq->auth_expired_work, nvmet_auth_expired_work);
29 	sq->authenticated = false;
30 	sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
31 }
32 
33 static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
34 {
35 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
36 	struct nvmf_auth_dhchap_negotiate_data *data = d;
37 	int i, hash_id = 0, fallback_hash_id = 0, dhgid, fallback_dhgid;
38 
39 	pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n",
40 		 __func__, ctrl->cntlid, req->sq->qid,
41 		 data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid,
42 		 data->auth_protocol[0].dhchap.halen,
43 		 data->auth_protocol[0].dhchap.dhlen);
44 	req->sq->dhchap_tid = le16_to_cpu(data->t_id);
45 	req->sq->sc_c = data->sc_c;
46 	if (data->sc_c != NVME_AUTH_SECP_NOSC) {
47 		if (!IS_ENABLED(CONFIG_NVME_TARGET_TCP_TLS))
48 			return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
49 		/* Secure concatenation can only be enabled on the admin queue */
50 		if (req->sq->qid)
51 			return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
52 		switch (data->sc_c) {
53 		case NVME_AUTH_SECP_NEWTLSPSK:
54 			if (nvmet_queue_tls_keyid(req->sq))
55 				return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
56 			break;
57 		case NVME_AUTH_SECP_REPLACETLSPSK:
58 			if (!nvmet_queue_tls_keyid(req->sq))
59 				return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
60 			break;
61 		default:
62 			return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
63 		}
64 		ctrl->concat = true;
65 	}
66 
67 	if (data->napd != 1)
68 		return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
69 
70 	if (data->auth_protocol[0].dhchap.authid !=
71 	    NVME_AUTH_DHCHAP_AUTH_ID)
72 		return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
73 
74 	for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) {
75 		u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i];
76 
77 		if (!fallback_hash_id && nvme_auth_hmac_hash_len(host_hmac_id))
78 			fallback_hash_id = host_hmac_id;
79 		if (ctrl->shash_id != host_hmac_id)
80 			continue;
81 		hash_id = ctrl->shash_id;
82 		break;
83 	}
84 	if (hash_id == 0) {
85 		if (fallback_hash_id == 0) {
86 			pr_debug("%s: ctrl %d qid %d: no usable hash found\n",
87 				 __func__, ctrl->cntlid, req->sq->qid);
88 			return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
89 		}
90 		pr_debug("%s: ctrl %d qid %d: no usable hash found, falling back to %s\n",
91 			 __func__, ctrl->cntlid, req->sq->qid,
92 			 nvme_auth_hmac_name(fallback_hash_id));
93 		ctrl->shash_id = fallback_hash_id;
94 	}
95 
96 	dhgid = -1;
97 	fallback_dhgid = -1;
98 	for (i = 0; i < data->auth_protocol[0].dhchap.dhlen; i++) {
99 		int tmp_dhgid = data->auth_protocol[0].dhchap.idlist[i + 30];
100 
101 		if (tmp_dhgid != ctrl->dh_gid) {
102 			dhgid = tmp_dhgid;
103 			break;
104 		}
105 		if (fallback_dhgid < 0) {
106 			const char *kpp = nvme_auth_dhgroup_kpp(tmp_dhgid);
107 
108 			if (crypto_has_kpp(kpp, 0, 0))
109 				fallback_dhgid = tmp_dhgid;
110 		}
111 	}
112 	if (dhgid < 0) {
113 		if (fallback_dhgid < 0) {
114 			pr_debug("%s: ctrl %d qid %d: no usable DH group found\n",
115 				 __func__, ctrl->cntlid, req->sq->qid);
116 			return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
117 		}
118 		pr_debug("%s: ctrl %d qid %d: configured DH group %s not found\n",
119 			 __func__, ctrl->cntlid, req->sq->qid,
120 			 nvme_auth_dhgroup_name(fallback_dhgid));
121 		ctrl->dh_gid = fallback_dhgid;
122 	}
123 	if (ctrl->dh_gid == NVME_AUTH_DHGROUP_NULL && ctrl->concat) {
124 		pr_debug("%s: ctrl %d qid %d: NULL DH group invalid "
125 			 "for secure channel concatenation\n", __func__,
126 			 ctrl->cntlid, req->sq->qid);
127 		return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
128 	}
129 	pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n",
130 		 __func__, ctrl->cntlid, req->sq->qid,
131 		 nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid);
132 	return 0;
133 }
134 
135 static u8 nvmet_auth_reply(struct nvmet_req *req, void *d)
136 {
137 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
138 	struct nvmf_auth_dhchap_reply_data *data = d;
139 	u16 dhvlen = le16_to_cpu(data->dhvlen);
140 	u8 *response;
141 
142 	pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %u\n",
143 		 __func__, ctrl->cntlid, req->sq->qid,
144 		 data->hl, data->cvalid, dhvlen);
145 
146 	if (dhvlen) {
147 		if (!ctrl->dh_tfm)
148 			return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
149 		if (nvmet_auth_ctrl_sesskey(req, data->rval + 2 * data->hl,
150 					    dhvlen) < 0)
151 			return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
152 	}
153 
154 	response = kmalloc(data->hl, GFP_KERNEL);
155 	if (!response)
156 		return NVME_AUTH_DHCHAP_FAILURE_FAILED;
157 
158 	if (!ctrl->host_key) {
159 		pr_warn("ctrl %d qid %d no host key\n",
160 			ctrl->cntlid, req->sq->qid);
161 		kfree(response);
162 		return NVME_AUTH_DHCHAP_FAILURE_FAILED;
163 	}
164 	if (nvmet_auth_host_hash(req, response, data->hl) < 0) {
165 		pr_debug("ctrl %d qid %d host hash failed\n",
166 			 ctrl->cntlid, req->sq->qid);
167 		kfree(response);
168 		return NVME_AUTH_DHCHAP_FAILURE_FAILED;
169 	}
170 
171 	if (memcmp(data->rval, response, data->hl)) {
172 		pr_info("ctrl %d qid %d host response mismatch\n",
173 			ctrl->cntlid, req->sq->qid);
174 		pr_debug("ctrl %d qid %d rval %*ph\n",
175 			 ctrl->cntlid, req->sq->qid, data->hl, data->rval);
176 		pr_debug("ctrl %d qid %d response %*ph\n",
177 			 ctrl->cntlid, req->sq->qid, data->hl, response);
178 		kfree(response);
179 		return NVME_AUTH_DHCHAP_FAILURE_FAILED;
180 	}
181 	kfree(response);
182 	pr_debug("%s: ctrl %d qid %d host authenticated\n",
183 		 __func__, ctrl->cntlid, req->sq->qid);
184 	if (!data->cvalid && ctrl->concat) {
185 		pr_debug("%s: ctrl %d qid %d invalid challenge\n",
186 			 __func__, ctrl->cntlid, req->sq->qid);
187 		return NVME_AUTH_DHCHAP_FAILURE_FAILED;
188 	}
189 	req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
190 	if (data->cvalid) {
191 		req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl,
192 					     GFP_KERNEL);
193 		if (!req->sq->dhchap_c2)
194 			return NVME_AUTH_DHCHAP_FAILURE_FAILED;
195 
196 		pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
197 			 __func__, ctrl->cntlid, req->sq->qid, data->hl,
198 			 req->sq->dhchap_c2);
199 	}
200 	/*
201 	 * NVMe Base Spec 2.2 section 8.3.4.5.4: DH-HMAC-CHAP_Reply message
202 	 * Sequence Number (SEQNUM): [ .. ]
203 	 * The value 0h is used to indicate that bidirectional authentication
204 	 * is not performed, but a challenge value C2 is carried in order to
205 	 * generate a pre-shared key (PSK) for subsequent establishment of a
206 	 * secure channel.
207 	 */
208 	if (req->sq->dhchap_s2 == 0) {
209 		if (ctrl->concat)
210 			nvmet_auth_insert_psk(req->sq);
211 		req->sq->authenticated = true;
212 		kfree(req->sq->dhchap_c2);
213 		req->sq->dhchap_c2 = NULL;
214 	} else if (!data->cvalid)
215 		req->sq->authenticated = true;
216 
217 	return 0;
218 }
219 
220 static u8 nvmet_auth_failure2(void *d)
221 {
222 	struct nvmf_auth_dhchap_failure_data *data = d;
223 
224 	return data->rescode_exp;
225 }
226 
227 u32 nvmet_auth_send_data_len(struct nvmet_req *req)
228 {
229 	return le32_to_cpu(req->cmd->auth_send.tl);
230 }
231 
232 void nvmet_execute_auth_send(struct nvmet_req *req)
233 {
234 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
235 	struct nvmf_auth_dhchap_success2_data *data;
236 	void *d;
237 	u32 tl;
238 	u16 status = 0;
239 	u8 dhchap_status;
240 
241 	if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
242 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
243 		req->error_loc =
244 			offsetof(struct nvmf_auth_send_command, secp);
245 		goto done;
246 	}
247 	if (req->cmd->auth_send.spsp0 != 0x01) {
248 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
249 		req->error_loc =
250 			offsetof(struct nvmf_auth_send_command, spsp0);
251 		goto done;
252 	}
253 	if (req->cmd->auth_send.spsp1 != 0x01) {
254 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
255 		req->error_loc =
256 			offsetof(struct nvmf_auth_send_command, spsp1);
257 		goto done;
258 	}
259 	tl = nvmet_auth_send_data_len(req);
260 	if (!tl) {
261 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
262 		req->error_loc =
263 			offsetof(struct nvmf_auth_send_command, tl);
264 		goto done;
265 	}
266 	if (!nvmet_check_transfer_len(req, tl)) {
267 		pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl);
268 		return;
269 	}
270 
271 	d = kmalloc(tl, GFP_KERNEL);
272 	if (!d) {
273 		status = NVME_SC_INTERNAL;
274 		goto done;
275 	}
276 
277 	status = nvmet_copy_from_sgl(req, 0, d, tl);
278 	if (status)
279 		goto done_kfree;
280 
281 	data = d;
282 	pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
283 		 ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
284 		 req->sq->dhchap_step);
285 	if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
286 	    data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
287 		goto done_failure1;
288 	if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
289 		if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
290 			/* Restart negotiation */
291 			pr_debug("%s: ctrl %d qid %d reset negotiation\n",
292 				 __func__, ctrl->cntlid, req->sq->qid);
293 			if (!req->sq->qid) {
294 				dhchap_status = nvmet_setup_auth(ctrl, req->sq);
295 				if (dhchap_status) {
296 					pr_err("ctrl %d qid 0 failed to setup re-authentication\n",
297 					       ctrl->cntlid);
298 					req->sq->dhchap_status = dhchap_status;
299 					req->sq->dhchap_step =
300 						NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
301 					goto done_kfree;
302 				}
303 			}
304 			req->sq->dhchap_step =
305 				NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
306 		} else if (data->auth_id != req->sq->dhchap_step)
307 			goto done_failure1;
308 		/* Validate negotiation parameters */
309 		dhchap_status = nvmet_auth_negotiate(req, d);
310 		if (dhchap_status == 0)
311 			req->sq->dhchap_step =
312 				NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
313 		else {
314 			req->sq->dhchap_step =
315 				NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
316 			req->sq->dhchap_status = dhchap_status;
317 		}
318 		goto done_kfree;
319 	}
320 	if (data->auth_id != req->sq->dhchap_step) {
321 		pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n",
322 			 __func__, ctrl->cntlid, req->sq->qid,
323 			 data->auth_id, req->sq->dhchap_step);
324 		goto done_failure1;
325 	}
326 	if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) {
327 		pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n",
328 			 __func__, ctrl->cntlid, req->sq->qid,
329 			 le16_to_cpu(data->t_id),
330 			 req->sq->dhchap_tid);
331 		req->sq->dhchap_step =
332 			NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
333 		req->sq->dhchap_status =
334 			NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
335 		goto done_kfree;
336 	}
337 
338 	switch (data->auth_id) {
339 	case NVME_AUTH_DHCHAP_MESSAGE_REPLY:
340 		dhchap_status = nvmet_auth_reply(req, d);
341 		if (dhchap_status == 0)
342 			req->sq->dhchap_step =
343 				NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
344 		else {
345 			req->sq->dhchap_step =
346 				NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
347 			req->sq->dhchap_status = dhchap_status;
348 		}
349 		goto done_kfree;
350 	case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
351 		if (ctrl->concat)
352 			nvmet_auth_insert_psk(req->sq);
353 		req->sq->authenticated = true;
354 		pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
355 			 __func__, ctrl->cntlid, req->sq->qid);
356 		goto done_kfree;
357 	case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
358 		dhchap_status = nvmet_auth_failure2(d);
359 		if (dhchap_status) {
360 			pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
361 				ctrl->cntlid, req->sq->qid, dhchap_status);
362 			req->sq->dhchap_status = dhchap_status;
363 			req->sq->authenticated = false;
364 		}
365 		goto done_kfree;
366 	default:
367 		req->sq->dhchap_status =
368 			NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
369 		req->sq->dhchap_step =
370 			NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
371 		req->sq->authenticated = false;
372 		goto done_kfree;
373 	}
374 done_failure1:
375 	req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
376 	req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
377 
378 done_kfree:
379 	kfree(d);
380 done:
381 	pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__,
382 		 ctrl->cntlid, req->sq->qid,
383 		 req->sq->dhchap_status, req->sq->dhchap_step);
384 	if (status)
385 		pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
386 			 __func__, ctrl->cntlid, req->sq->qid,
387 			 status, req->error_loc);
388 	if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
389 	    req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
390 		unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
391 
392 		mod_delayed_work(system_wq, &req->sq->auth_expired_work,
393 				 auth_expire_secs * HZ);
394 		goto complete;
395 	}
396 	/* Final states, clear up variables */
397 	nvmet_auth_sq_free(req->sq);
398 	if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
399 		nvmet_ctrl_fatal_error(ctrl);
400 
401 complete:
402 	nvmet_req_complete(req, status);
403 }
404 
405 static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
406 {
407 	struct nvmf_auth_dhchap_challenge_data *data = d;
408 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
409 	int ret = 0;
410 	int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
411 	int data_size = sizeof(*d) + hash_len;
412 
413 	if (ctrl->dh_tfm)
414 		data_size += ctrl->dh_keysize;
415 	if (al < data_size) {
416 		pr_debug("%s: buffer too small (al %d need %d)\n", __func__,
417 			 al, data_size);
418 		return -EINVAL;
419 	}
420 	memset(data, 0, data_size);
421 	req->sq->dhchap_s1 = nvme_auth_get_seqnum();
422 	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
423 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
424 	data->t_id = cpu_to_le16(req->sq->dhchap_tid);
425 	data->hashid = ctrl->shash_id;
426 	data->hl = hash_len;
427 	data->seqnum = cpu_to_le32(req->sq->dhchap_s1);
428 	req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL);
429 	if (!req->sq->dhchap_c1)
430 		return -ENOMEM;
431 	get_random_bytes(req->sq->dhchap_c1, data->hl);
432 	memcpy(data->cval, req->sq->dhchap_c1, data->hl);
433 	if (ctrl->dh_tfm) {
434 		data->dhgid = ctrl->dh_gid;
435 		data->dhvlen = cpu_to_le16(ctrl->dh_keysize);
436 		ret = nvmet_auth_ctrl_exponential(req, data->cval + data->hl,
437 						  ctrl->dh_keysize);
438 	}
439 	pr_debug("%s: ctrl %d qid %d seq %d transaction %d hl %d dhvlen %zu\n",
440 		 __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
441 		 req->sq->dhchap_tid, data->hl, ctrl->dh_keysize);
442 	return ret;
443 }
444 
445 static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al)
446 {
447 	struct nvmf_auth_dhchap_success1_data *data = d;
448 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
449 	int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
450 
451 	WARN_ON(al < sizeof(*data));
452 	memset(data, 0, sizeof(*data));
453 	data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
454 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
455 	data->t_id = cpu_to_le16(req->sq->dhchap_tid);
456 	data->hl = hash_len;
457 	if (req->sq->dhchap_c2) {
458 		if (!ctrl->ctrl_key) {
459 			pr_warn("ctrl %d qid %d no ctrl key\n",
460 				ctrl->cntlid, req->sq->qid);
461 			return NVME_AUTH_DHCHAP_FAILURE_FAILED;
462 		}
463 		if (nvmet_auth_ctrl_hash(req, data->rval, data->hl))
464 			return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
465 		data->rvalid = 1;
466 		pr_debug("ctrl %d qid %d response %*ph\n",
467 			 ctrl->cntlid, req->sq->qid, data->hl, data->rval);
468 	}
469 	return 0;
470 }
471 
472 static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al)
473 {
474 	struct nvmf_auth_dhchap_failure_data *data = d;
475 
476 	WARN_ON(al < sizeof(*data));
477 	data->auth_type = NVME_AUTH_COMMON_MESSAGES;
478 	data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
479 	data->t_id = cpu_to_le16(req->sq->dhchap_tid);
480 	data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
481 	data->rescode_exp = req->sq->dhchap_status;
482 }
483 
484 u32 nvmet_auth_receive_data_len(struct nvmet_req *req)
485 {
486 	return le32_to_cpu(req->cmd->auth_receive.al);
487 }
488 
489 void nvmet_execute_auth_receive(struct nvmet_req *req)
490 {
491 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
492 	void *d;
493 	u32 al;
494 	u16 status = 0;
495 
496 	if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
497 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
498 		req->error_loc =
499 			offsetof(struct nvmf_auth_receive_command, secp);
500 		goto done;
501 	}
502 	if (req->cmd->auth_receive.spsp0 != 0x01) {
503 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
504 		req->error_loc =
505 			offsetof(struct nvmf_auth_receive_command, spsp0);
506 		goto done;
507 	}
508 	if (req->cmd->auth_receive.spsp1 != 0x01) {
509 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
510 		req->error_loc =
511 			offsetof(struct nvmf_auth_receive_command, spsp1);
512 		goto done;
513 	}
514 	al = nvmet_auth_receive_data_len(req);
515 	if (!al) {
516 		status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
517 		req->error_loc =
518 			offsetof(struct nvmf_auth_receive_command, al);
519 		goto done;
520 	}
521 	if (!nvmet_check_transfer_len(req, al)) {
522 		pr_debug("%s: transfer length mismatch (%u)\n", __func__, al);
523 		return;
524 	}
525 
526 	d = kmalloc(al, GFP_KERNEL);
527 	if (!d) {
528 		status = NVME_SC_INTERNAL;
529 		goto done;
530 	}
531 	pr_debug("%s: ctrl %d qid %d step %x\n", __func__,
532 		 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
533 	switch (req->sq->dhchap_step) {
534 	case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE:
535 		if (nvmet_auth_challenge(req, d, al) < 0) {
536 			pr_warn("ctrl %d qid %d: challenge error (%d)\n",
537 				ctrl->cntlid, req->sq->qid, status);
538 			status = NVME_SC_INTERNAL;
539 			break;
540 		}
541 		req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
542 		break;
543 	case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1:
544 		status = nvmet_auth_success1(req, d, al);
545 		if (status) {
546 			req->sq->dhchap_status = status;
547 			req->sq->authenticated = false;
548 			nvmet_auth_failure1(req, d, al);
549 			pr_warn("ctrl %d qid %d: success1 status (%x)\n",
550 				ctrl->cntlid, req->sq->qid,
551 				req->sq->dhchap_status);
552 			break;
553 		}
554 		req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
555 		break;
556 	case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1:
557 		req->sq->authenticated = false;
558 		nvmet_auth_failure1(req, d, al);
559 		pr_warn("ctrl %d qid %d failure1 (%x)\n",
560 			ctrl->cntlid, req->sq->qid, req->sq->dhchap_status);
561 		break;
562 	default:
563 		pr_warn("ctrl %d qid %d unhandled step (%d)\n",
564 			ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
565 		req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
566 		req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
567 		nvmet_auth_failure1(req, d, al);
568 		status = 0;
569 		break;
570 	}
571 
572 	status = nvmet_copy_to_sgl(req, 0, d, al);
573 	kfree(d);
574 done:
575 	if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
576 		nvmet_auth_sq_free(req->sq);
577 	else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
578 		nvmet_auth_sq_free(req->sq);
579 		nvmet_ctrl_fatal_error(ctrl);
580 	}
581 	nvmet_req_complete(req, status);
582 }
583