1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * NVMe over Fabrics DH-HMAC-CHAP authentication command handling.
4 * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions.
5 * All rights reserved.
6 */
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/blkdev.h>
9 #include <linux/random.h>
10 #include <linux/nvme-auth.h>
11 #include <crypto/hash.h>
12 #include <crypto/kpp.h>
13 #include "nvmet.h"
14
nvmet_auth_expired_work(struct work_struct * work)15 static void nvmet_auth_expired_work(struct work_struct *work)
16 {
17 struct nvmet_sq *sq = container_of(to_delayed_work(work),
18 struct nvmet_sq, auth_expired_work);
19
20 pr_debug("%s: ctrl %d qid %d transaction %u expired, resetting\n",
21 __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid);
22 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
23 sq->dhchap_tid = -1;
24 }
25
nvmet_auth_sq_init(struct nvmet_sq * sq)26 void nvmet_auth_sq_init(struct nvmet_sq *sq)
27 {
28 /* Initialize in-band authentication */
29 INIT_DELAYED_WORK(&sq->auth_expired_work, nvmet_auth_expired_work);
30 sq->authenticated = false;
31 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
32 }
33
nvmet_auth_negotiate(struct nvmet_req * req,void * d)34 static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d)
35 {
36 struct nvmet_ctrl *ctrl = req->sq->ctrl;
37 struct nvmf_auth_dhchap_negotiate_data *data = d;
38 int i, hash_id = 0, fallback_hash_id = 0, dhgid, fallback_dhgid;
39
40 pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n",
41 __func__, ctrl->cntlid, req->sq->qid,
42 data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid,
43 data->auth_protocol[0].dhchap.halen,
44 data->auth_protocol[0].dhchap.dhlen);
45 req->sq->dhchap_tid = le16_to_cpu(data->t_id);
46 if (data->sc_c)
47 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH;
48
49 if (data->napd != 1)
50 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
51
52 if (data->auth_protocol[0].dhchap.authid !=
53 NVME_AUTH_DHCHAP_AUTH_ID)
54 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
55
56 for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) {
57 u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i];
58
59 if (!fallback_hash_id &&
60 crypto_has_shash(nvme_auth_hmac_name(host_hmac_id), 0, 0))
61 fallback_hash_id = host_hmac_id;
62 if (ctrl->shash_id != host_hmac_id)
63 continue;
64 hash_id = ctrl->shash_id;
65 break;
66 }
67 if (hash_id == 0) {
68 if (fallback_hash_id == 0) {
69 pr_debug("%s: ctrl %d qid %d: no usable hash found\n",
70 __func__, ctrl->cntlid, req->sq->qid);
71 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
72 }
73 pr_debug("%s: ctrl %d qid %d: no usable hash found, falling back to %s\n",
74 __func__, ctrl->cntlid, req->sq->qid,
75 nvme_auth_hmac_name(fallback_hash_id));
76 ctrl->shash_id = fallback_hash_id;
77 }
78
79 dhgid = -1;
80 fallback_dhgid = -1;
81 for (i = 0; i < data->auth_protocol[0].dhchap.dhlen; i++) {
82 int tmp_dhgid = data->auth_protocol[0].dhchap.idlist[i + 30];
83
84 if (tmp_dhgid != ctrl->dh_gid) {
85 dhgid = tmp_dhgid;
86 break;
87 }
88 if (fallback_dhgid < 0) {
89 const char *kpp = nvme_auth_dhgroup_kpp(tmp_dhgid);
90
91 if (crypto_has_kpp(kpp, 0, 0))
92 fallback_dhgid = tmp_dhgid;
93 }
94 }
95 if (dhgid < 0) {
96 if (fallback_dhgid < 0) {
97 pr_debug("%s: ctrl %d qid %d: no usable DH group found\n",
98 __func__, ctrl->cntlid, req->sq->qid);
99 return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
100 }
101 pr_debug("%s: ctrl %d qid %d: configured DH group %s not found\n",
102 __func__, ctrl->cntlid, req->sq->qid,
103 nvme_auth_dhgroup_name(fallback_dhgid));
104 ctrl->dh_gid = fallback_dhgid;
105 }
106 pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n",
107 __func__, ctrl->cntlid, req->sq->qid,
108 nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid);
109 return 0;
110 }
111
nvmet_auth_reply(struct nvmet_req * req,void * d)112 static u8 nvmet_auth_reply(struct nvmet_req *req, void *d)
113 {
114 struct nvmet_ctrl *ctrl = req->sq->ctrl;
115 struct nvmf_auth_dhchap_reply_data *data = d;
116 u16 dhvlen = le16_to_cpu(data->dhvlen);
117 u8 *response;
118
119 pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %u\n",
120 __func__, ctrl->cntlid, req->sq->qid,
121 data->hl, data->cvalid, dhvlen);
122
123 if (dhvlen) {
124 if (!ctrl->dh_tfm)
125 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
126 if (nvmet_auth_ctrl_sesskey(req, data->rval + 2 * data->hl,
127 dhvlen) < 0)
128 return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE;
129 }
130
131 response = kmalloc(data->hl, GFP_KERNEL);
132 if (!response)
133 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
134
135 if (!ctrl->host_key) {
136 pr_warn("ctrl %d qid %d no host key\n",
137 ctrl->cntlid, req->sq->qid);
138 kfree(response);
139 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
140 }
141 if (nvmet_auth_host_hash(req, response, data->hl) < 0) {
142 pr_debug("ctrl %d qid %d host hash failed\n",
143 ctrl->cntlid, req->sq->qid);
144 kfree(response);
145 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
146 }
147
148 if (memcmp(data->rval, response, data->hl)) {
149 pr_info("ctrl %d qid %d host response mismatch\n",
150 ctrl->cntlid, req->sq->qid);
151 kfree(response);
152 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
153 }
154 kfree(response);
155 pr_debug("%s: ctrl %d qid %d host authenticated\n",
156 __func__, ctrl->cntlid, req->sq->qid);
157 if (data->cvalid) {
158 req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl,
159 GFP_KERNEL);
160 if (!req->sq->dhchap_c2)
161 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
162
163 pr_debug("%s: ctrl %d qid %d challenge %*ph\n",
164 __func__, ctrl->cntlid, req->sq->qid, data->hl,
165 req->sq->dhchap_c2);
166 } else {
167 req->sq->authenticated = true;
168 req->sq->dhchap_c2 = NULL;
169 }
170 req->sq->dhchap_s2 = le32_to_cpu(data->seqnum);
171
172 return 0;
173 }
174
nvmet_auth_failure2(void * d)175 static u8 nvmet_auth_failure2(void *d)
176 {
177 struct nvmf_auth_dhchap_failure_data *data = d;
178
179 return data->rescode_exp;
180 }
181
nvmet_execute_auth_send(struct nvmet_req * req)182 void nvmet_execute_auth_send(struct nvmet_req *req)
183 {
184 struct nvmet_ctrl *ctrl = req->sq->ctrl;
185 struct nvmf_auth_dhchap_success2_data *data;
186 void *d;
187 u32 tl;
188 u16 status = 0;
189 u8 dhchap_status;
190
191 if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
192 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
193 req->error_loc =
194 offsetof(struct nvmf_auth_send_command, secp);
195 goto done;
196 }
197 if (req->cmd->auth_send.spsp0 != 0x01) {
198 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
199 req->error_loc =
200 offsetof(struct nvmf_auth_send_command, spsp0);
201 goto done;
202 }
203 if (req->cmd->auth_send.spsp1 != 0x01) {
204 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
205 req->error_loc =
206 offsetof(struct nvmf_auth_send_command, spsp1);
207 goto done;
208 }
209 tl = le32_to_cpu(req->cmd->auth_send.tl);
210 if (!tl) {
211 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
212 req->error_loc =
213 offsetof(struct nvmf_auth_send_command, tl);
214 goto done;
215 }
216 if (!nvmet_check_transfer_len(req, tl)) {
217 pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl);
218 return;
219 }
220
221 d = kmalloc(tl, GFP_KERNEL);
222 if (!d) {
223 status = NVME_SC_INTERNAL;
224 goto done;
225 }
226
227 status = nvmet_copy_from_sgl(req, 0, d, tl);
228 if (status)
229 goto done_kfree;
230
231 data = d;
232 pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__,
233 ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id,
234 req->sq->dhchap_step);
235 if (data->auth_type != NVME_AUTH_COMMON_MESSAGES &&
236 data->auth_type != NVME_AUTH_DHCHAP_MESSAGES)
237 goto done_failure1;
238 if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) {
239 if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) {
240 /* Restart negotiation */
241 pr_debug("%s: ctrl %d qid %d reset negotiation\n",
242 __func__, ctrl->cntlid, req->sq->qid);
243 if (!req->sq->qid) {
244 dhchap_status = nvmet_setup_auth(ctrl);
245 if (dhchap_status) {
246 pr_err("ctrl %d qid 0 failed to setup re-authentication\n",
247 ctrl->cntlid);
248 req->sq->dhchap_status = dhchap_status;
249 req->sq->dhchap_step =
250 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
251 goto done_kfree;
252 }
253 }
254 req->sq->dhchap_step =
255 NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE;
256 } else if (data->auth_id != req->sq->dhchap_step)
257 goto done_failure1;
258 /* Validate negotiation parameters */
259 dhchap_status = nvmet_auth_negotiate(req, d);
260 if (dhchap_status == 0)
261 req->sq->dhchap_step =
262 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
263 else {
264 req->sq->dhchap_step =
265 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
266 req->sq->dhchap_status = dhchap_status;
267 }
268 goto done_kfree;
269 }
270 if (data->auth_id != req->sq->dhchap_step) {
271 pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n",
272 __func__, ctrl->cntlid, req->sq->qid,
273 data->auth_id, req->sq->dhchap_step);
274 goto done_failure1;
275 }
276 if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) {
277 pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n",
278 __func__, ctrl->cntlid, req->sq->qid,
279 le16_to_cpu(data->t_id),
280 req->sq->dhchap_tid);
281 req->sq->dhchap_step =
282 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
283 req->sq->dhchap_status =
284 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD;
285 goto done_kfree;
286 }
287
288 switch (data->auth_id) {
289 case NVME_AUTH_DHCHAP_MESSAGE_REPLY:
290 dhchap_status = nvmet_auth_reply(req, d);
291 if (dhchap_status == 0)
292 req->sq->dhchap_step =
293 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
294 else {
295 req->sq->dhchap_step =
296 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
297 req->sq->dhchap_status = dhchap_status;
298 }
299 goto done_kfree;
300 case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2:
301 req->sq->authenticated = true;
302 pr_debug("%s: ctrl %d qid %d ctrl authenticated\n",
303 __func__, ctrl->cntlid, req->sq->qid);
304 goto done_kfree;
305 case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2:
306 dhchap_status = nvmet_auth_failure2(d);
307 if (dhchap_status) {
308 pr_warn("ctrl %d qid %d: authentication failed (%d)\n",
309 ctrl->cntlid, req->sq->qid, dhchap_status);
310 req->sq->dhchap_status = dhchap_status;
311 req->sq->authenticated = false;
312 }
313 goto done_kfree;
314 default:
315 req->sq->dhchap_status =
316 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
317 req->sq->dhchap_step =
318 NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
319 req->sq->authenticated = false;
320 goto done_kfree;
321 }
322 done_failure1:
323 req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE;
324 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2;
325
326 done_kfree:
327 kfree(d);
328 done:
329 pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__,
330 ctrl->cntlid, req->sq->qid,
331 req->sq->dhchap_status, req->sq->dhchap_step);
332 if (status)
333 pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n",
334 __func__, ctrl->cntlid, req->sq->qid,
335 status, req->error_loc);
336 if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 &&
337 req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) {
338 unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120;
339
340 mod_delayed_work(system_wq, &req->sq->auth_expired_work,
341 auth_expire_secs * HZ);
342 goto complete;
343 }
344 /* Final states, clear up variables */
345 nvmet_auth_sq_free(req->sq);
346 if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2)
347 nvmet_ctrl_fatal_error(ctrl);
348
349 complete:
350 nvmet_req_complete(req, status);
351 }
352
nvmet_auth_challenge(struct nvmet_req * req,void * d,int al)353 static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al)
354 {
355 struct nvmf_auth_dhchap_challenge_data *data = d;
356 struct nvmet_ctrl *ctrl = req->sq->ctrl;
357 int ret = 0;
358 int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
359 int data_size = sizeof(*d) + hash_len;
360
361 if (ctrl->dh_tfm)
362 data_size += ctrl->dh_keysize;
363 if (al < data_size) {
364 pr_debug("%s: buffer too small (al %d need %d)\n", __func__,
365 al, data_size);
366 return -EINVAL;
367 }
368 memset(data, 0, data_size);
369 req->sq->dhchap_s1 = nvme_auth_get_seqnum();
370 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
371 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE;
372 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
373 data->hashid = ctrl->shash_id;
374 data->hl = hash_len;
375 data->seqnum = cpu_to_le32(req->sq->dhchap_s1);
376 req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL);
377 if (!req->sq->dhchap_c1)
378 return -ENOMEM;
379 get_random_bytes(req->sq->dhchap_c1, data->hl);
380 memcpy(data->cval, req->sq->dhchap_c1, data->hl);
381 if (ctrl->dh_tfm) {
382 data->dhgid = ctrl->dh_gid;
383 data->dhvlen = cpu_to_le16(ctrl->dh_keysize);
384 ret = nvmet_auth_ctrl_exponential(req, data->cval + data->hl,
385 ctrl->dh_keysize);
386 }
387 pr_debug("%s: ctrl %d qid %d seq %d transaction %d hl %d dhvlen %zu\n",
388 __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1,
389 req->sq->dhchap_tid, data->hl, ctrl->dh_keysize);
390 return ret;
391 }
392
nvmet_auth_success1(struct nvmet_req * req,void * d,int al)393 static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al)
394 {
395 struct nvmf_auth_dhchap_success1_data *data = d;
396 struct nvmet_ctrl *ctrl = req->sq->ctrl;
397 int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id);
398
399 WARN_ON(al < sizeof(*data));
400 memset(data, 0, sizeof(*data));
401 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES;
402 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1;
403 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
404 data->hl = hash_len;
405 if (req->sq->dhchap_c2) {
406 if (!ctrl->ctrl_key) {
407 pr_warn("ctrl %d qid %d no ctrl key\n",
408 ctrl->cntlid, req->sq->qid);
409 return NVME_AUTH_DHCHAP_FAILURE_FAILED;
410 }
411 if (nvmet_auth_ctrl_hash(req, data->rval, data->hl))
412 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE;
413 data->rvalid = 1;
414 pr_debug("ctrl %d qid %d response %*ph\n",
415 ctrl->cntlid, req->sq->qid, data->hl, data->rval);
416 }
417 return 0;
418 }
419
nvmet_auth_failure1(struct nvmet_req * req,void * d,int al)420 static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al)
421 {
422 struct nvmf_auth_dhchap_failure_data *data = d;
423
424 WARN_ON(al < sizeof(*data));
425 data->auth_type = NVME_AUTH_COMMON_MESSAGES;
426 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
427 data->t_id = cpu_to_le16(req->sq->dhchap_tid);
428 data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED;
429 data->rescode_exp = req->sq->dhchap_status;
430 }
431
nvmet_execute_auth_receive(struct nvmet_req * req)432 void nvmet_execute_auth_receive(struct nvmet_req *req)
433 {
434 struct nvmet_ctrl *ctrl = req->sq->ctrl;
435 void *d;
436 u32 al;
437 u16 status = 0;
438
439 if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) {
440 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
441 req->error_loc =
442 offsetof(struct nvmf_auth_receive_command, secp);
443 goto done;
444 }
445 if (req->cmd->auth_receive.spsp0 != 0x01) {
446 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
447 req->error_loc =
448 offsetof(struct nvmf_auth_receive_command, spsp0);
449 goto done;
450 }
451 if (req->cmd->auth_receive.spsp1 != 0x01) {
452 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
453 req->error_loc =
454 offsetof(struct nvmf_auth_receive_command, spsp1);
455 goto done;
456 }
457 al = le32_to_cpu(req->cmd->auth_receive.al);
458 if (!al) {
459 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR;
460 req->error_loc =
461 offsetof(struct nvmf_auth_receive_command, al);
462 goto done;
463 }
464 if (!nvmet_check_transfer_len(req, al)) {
465 pr_debug("%s: transfer length mismatch (%u)\n", __func__, al);
466 return;
467 }
468
469 d = kmalloc(al, GFP_KERNEL);
470 if (!d) {
471 status = NVME_SC_INTERNAL;
472 goto done;
473 }
474 pr_debug("%s: ctrl %d qid %d step %x\n", __func__,
475 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
476 switch (req->sq->dhchap_step) {
477 case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE:
478 if (nvmet_auth_challenge(req, d, al) < 0) {
479 pr_warn("ctrl %d qid %d: challenge error (%d)\n",
480 ctrl->cntlid, req->sq->qid, status);
481 status = NVME_SC_INTERNAL;
482 break;
483 }
484 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY;
485 break;
486 case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1:
487 status = nvmet_auth_success1(req, d, al);
488 if (status) {
489 req->sq->dhchap_status = status;
490 req->sq->authenticated = false;
491 nvmet_auth_failure1(req, d, al);
492 pr_warn("ctrl %d qid %d: success1 status (%x)\n",
493 ctrl->cntlid, req->sq->qid,
494 req->sq->dhchap_status);
495 break;
496 }
497 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2;
498 break;
499 case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1:
500 req->sq->authenticated = false;
501 nvmet_auth_failure1(req, d, al);
502 pr_warn("ctrl %d qid %d failure1 (%x)\n",
503 ctrl->cntlid, req->sq->qid, req->sq->dhchap_status);
504 break;
505 default:
506 pr_warn("ctrl %d qid %d unhandled step (%d)\n",
507 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step);
508 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1;
509 req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED;
510 nvmet_auth_failure1(req, d, al);
511 status = 0;
512 break;
513 }
514
515 status = nvmet_copy_to_sgl(req, 0, d, al);
516 kfree(d);
517 done:
518 if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2)
519 nvmet_auth_sq_free(req->sq);
520 else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) {
521 nvmet_auth_sq_free(req->sq);
522 nvmet_ctrl_fatal_error(ctrl);
523 }
524 nvmet_req_complete(req, status);
525 }
526