1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * NVMe over Fabrics DH-HMAC-CHAP authentication command handling. 4 * Copyright (c) 2020 Hannes Reinecke, SUSE Software Solutions. 5 * All rights reserved. 6 */ 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 #include <linux/blkdev.h> 9 #include <linux/random.h> 10 #include <linux/nvme-auth.h> 11 #include <crypto/hash.h> 12 #include <crypto/kpp.h> 13 #include "nvmet.h" 14 15 static void nvmet_auth_expired_work(struct work_struct *work) 16 { 17 struct nvmet_sq *sq = container_of(to_delayed_work(work), 18 struct nvmet_sq, auth_expired_work); 19 20 pr_debug("%s: ctrl %d qid %d transaction %u expired, resetting\n", 21 __func__, sq->ctrl->cntlid, sq->qid, sq->dhchap_tid); 22 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; 23 sq->dhchap_tid = -1; 24 } 25 26 void nvmet_auth_sq_init(struct nvmet_sq *sq) 27 { 28 /* Initialize in-band authentication */ 29 INIT_DELAYED_WORK(&sq->auth_expired_work, nvmet_auth_expired_work); 30 sq->authenticated = false; 31 sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; 32 } 33 34 static u8 nvmet_auth_negotiate(struct nvmet_req *req, void *d) 35 { 36 struct nvmet_ctrl *ctrl = req->sq->ctrl; 37 struct nvmf_auth_dhchap_negotiate_data *data = d; 38 int i, hash_id = 0, fallback_hash_id = 0, dhgid, fallback_dhgid; 39 40 pr_debug("%s: ctrl %d qid %d: data sc_d %d napd %d authid %d halen %d dhlen %d\n", 41 __func__, ctrl->cntlid, req->sq->qid, 42 data->sc_c, data->napd, data->auth_protocol[0].dhchap.authid, 43 data->auth_protocol[0].dhchap.halen, 44 data->auth_protocol[0].dhchap.dhlen); 45 req->sq->dhchap_tid = le16_to_cpu(data->t_id); 46 if (data->sc_c) 47 return NVME_AUTH_DHCHAP_FAILURE_CONCAT_MISMATCH; 48 49 if (data->napd != 1) 50 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 51 52 if (data->auth_protocol[0].dhchap.authid != 53 NVME_AUTH_DHCHAP_AUTH_ID) 54 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 55 56 for (i = 0; i < data->auth_protocol[0].dhchap.halen; i++) { 57 u8 host_hmac_id = data->auth_protocol[0].dhchap.idlist[i]; 58 59 if (!fallback_hash_id && 60 crypto_has_shash(nvme_auth_hmac_name(host_hmac_id), 0, 0)) 61 fallback_hash_id = host_hmac_id; 62 if (ctrl->shash_id != host_hmac_id) 63 continue; 64 hash_id = ctrl->shash_id; 65 break; 66 } 67 if (hash_id == 0) { 68 if (fallback_hash_id == 0) { 69 pr_debug("%s: ctrl %d qid %d: no usable hash found\n", 70 __func__, ctrl->cntlid, req->sq->qid); 71 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 72 } 73 pr_debug("%s: ctrl %d qid %d: no usable hash found, falling back to %s\n", 74 __func__, ctrl->cntlid, req->sq->qid, 75 nvme_auth_hmac_name(fallback_hash_id)); 76 ctrl->shash_id = fallback_hash_id; 77 } 78 79 dhgid = -1; 80 fallback_dhgid = -1; 81 for (i = 0; i < data->auth_protocol[0].dhchap.dhlen; i++) { 82 int tmp_dhgid = data->auth_protocol[0].dhchap.idlist[i + 30]; 83 84 if (tmp_dhgid != ctrl->dh_gid) { 85 dhgid = tmp_dhgid; 86 break; 87 } 88 if (fallback_dhgid < 0) { 89 const char *kpp = nvme_auth_dhgroup_kpp(tmp_dhgid); 90 91 if (crypto_has_kpp(kpp, 0, 0)) 92 fallback_dhgid = tmp_dhgid; 93 } 94 } 95 if (dhgid < 0) { 96 if (fallback_dhgid < 0) { 97 pr_debug("%s: ctrl %d qid %d: no usable DH group found\n", 98 __func__, ctrl->cntlid, req->sq->qid); 99 return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; 100 } 101 pr_debug("%s: ctrl %d qid %d: configured DH group %s not found\n", 102 __func__, ctrl->cntlid, req->sq->qid, 103 nvme_auth_dhgroup_name(fallback_dhgid)); 104 ctrl->dh_gid = fallback_dhgid; 105 } 106 pr_debug("%s: ctrl %d qid %d: selected DH group %s (%d)\n", 107 __func__, ctrl->cntlid, req->sq->qid, 108 nvme_auth_dhgroup_name(ctrl->dh_gid), ctrl->dh_gid); 109 return 0; 110 } 111 112 static u8 nvmet_auth_reply(struct nvmet_req *req, void *d) 113 { 114 struct nvmet_ctrl *ctrl = req->sq->ctrl; 115 struct nvmf_auth_dhchap_reply_data *data = d; 116 u16 dhvlen = le16_to_cpu(data->dhvlen); 117 u8 *response; 118 119 pr_debug("%s: ctrl %d qid %d: data hl %d cvalid %d dhvlen %u\n", 120 __func__, ctrl->cntlid, req->sq->qid, 121 data->hl, data->cvalid, dhvlen); 122 123 if (dhvlen) { 124 if (!ctrl->dh_tfm) 125 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 126 if (nvmet_auth_ctrl_sesskey(req, data->rval + 2 * data->hl, 127 dhvlen) < 0) 128 return NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; 129 } 130 131 response = kmalloc(data->hl, GFP_KERNEL); 132 if (!response) 133 return NVME_AUTH_DHCHAP_FAILURE_FAILED; 134 135 if (!ctrl->host_key) { 136 pr_warn("ctrl %d qid %d no host key\n", 137 ctrl->cntlid, req->sq->qid); 138 kfree(response); 139 return NVME_AUTH_DHCHAP_FAILURE_FAILED; 140 } 141 if (nvmet_auth_host_hash(req, response, data->hl) < 0) { 142 pr_debug("ctrl %d qid %d host hash failed\n", 143 ctrl->cntlid, req->sq->qid); 144 kfree(response); 145 return NVME_AUTH_DHCHAP_FAILURE_FAILED; 146 } 147 148 if (memcmp(data->rval, response, data->hl)) { 149 pr_info("ctrl %d qid %d host response mismatch\n", 150 ctrl->cntlid, req->sq->qid); 151 kfree(response); 152 return NVME_AUTH_DHCHAP_FAILURE_FAILED; 153 } 154 kfree(response); 155 pr_debug("%s: ctrl %d qid %d host authenticated\n", 156 __func__, ctrl->cntlid, req->sq->qid); 157 if (data->cvalid) { 158 req->sq->dhchap_c2 = kmemdup(data->rval + data->hl, data->hl, 159 GFP_KERNEL); 160 if (!req->sq->dhchap_c2) 161 return NVME_AUTH_DHCHAP_FAILURE_FAILED; 162 163 pr_debug("%s: ctrl %d qid %d challenge %*ph\n", 164 __func__, ctrl->cntlid, req->sq->qid, data->hl, 165 req->sq->dhchap_c2); 166 } else { 167 req->sq->authenticated = true; 168 req->sq->dhchap_c2 = NULL; 169 } 170 req->sq->dhchap_s2 = le32_to_cpu(data->seqnum); 171 172 return 0; 173 } 174 175 static u8 nvmet_auth_failure2(void *d) 176 { 177 struct nvmf_auth_dhchap_failure_data *data = d; 178 179 return data->rescode_exp; 180 } 181 182 u32 nvmet_auth_send_data_len(struct nvmet_req *req) 183 { 184 return le32_to_cpu(req->cmd->auth_send.tl); 185 } 186 187 void nvmet_execute_auth_send(struct nvmet_req *req) 188 { 189 struct nvmet_ctrl *ctrl = req->sq->ctrl; 190 struct nvmf_auth_dhchap_success2_data *data; 191 void *d; 192 u32 tl; 193 u16 status = 0; 194 u8 dhchap_status; 195 196 if (req->cmd->auth_send.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { 197 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 198 req->error_loc = 199 offsetof(struct nvmf_auth_send_command, secp); 200 goto done; 201 } 202 if (req->cmd->auth_send.spsp0 != 0x01) { 203 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 204 req->error_loc = 205 offsetof(struct nvmf_auth_send_command, spsp0); 206 goto done; 207 } 208 if (req->cmd->auth_send.spsp1 != 0x01) { 209 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 210 req->error_loc = 211 offsetof(struct nvmf_auth_send_command, spsp1); 212 goto done; 213 } 214 tl = nvmet_auth_send_data_len(req); 215 if (!tl) { 216 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 217 req->error_loc = 218 offsetof(struct nvmf_auth_send_command, tl); 219 goto done; 220 } 221 if (!nvmet_check_transfer_len(req, tl)) { 222 pr_debug("%s: transfer length mismatch (%u)\n", __func__, tl); 223 return; 224 } 225 226 d = kmalloc(tl, GFP_KERNEL); 227 if (!d) { 228 status = NVME_SC_INTERNAL; 229 goto done; 230 } 231 232 status = nvmet_copy_from_sgl(req, 0, d, tl); 233 if (status) 234 goto done_kfree; 235 236 data = d; 237 pr_debug("%s: ctrl %d qid %d type %d id %d step %x\n", __func__, 238 ctrl->cntlid, req->sq->qid, data->auth_type, data->auth_id, 239 req->sq->dhchap_step); 240 if (data->auth_type != NVME_AUTH_COMMON_MESSAGES && 241 data->auth_type != NVME_AUTH_DHCHAP_MESSAGES) 242 goto done_failure1; 243 if (data->auth_type == NVME_AUTH_COMMON_MESSAGES) { 244 if (data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE) { 245 /* Restart negotiation */ 246 pr_debug("%s: ctrl %d qid %d reset negotiation\n", 247 __func__, ctrl->cntlid, req->sq->qid); 248 if (!req->sq->qid) { 249 dhchap_status = nvmet_setup_auth(ctrl); 250 if (dhchap_status) { 251 pr_err("ctrl %d qid 0 failed to setup re-authentication\n", 252 ctrl->cntlid); 253 req->sq->dhchap_status = dhchap_status; 254 req->sq->dhchap_step = 255 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; 256 goto done_kfree; 257 } 258 } 259 req->sq->dhchap_step = 260 NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; 261 } else if (data->auth_id != req->sq->dhchap_step) 262 goto done_failure1; 263 /* Validate negotiation parameters */ 264 dhchap_status = nvmet_auth_negotiate(req, d); 265 if (dhchap_status == 0) 266 req->sq->dhchap_step = 267 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE; 268 else { 269 req->sq->dhchap_step = 270 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; 271 req->sq->dhchap_status = dhchap_status; 272 } 273 goto done_kfree; 274 } 275 if (data->auth_id != req->sq->dhchap_step) { 276 pr_debug("%s: ctrl %d qid %d step mismatch (%d != %d)\n", 277 __func__, ctrl->cntlid, req->sq->qid, 278 data->auth_id, req->sq->dhchap_step); 279 goto done_failure1; 280 } 281 if (le16_to_cpu(data->t_id) != req->sq->dhchap_tid) { 282 pr_debug("%s: ctrl %d qid %d invalid transaction %d (expected %d)\n", 283 __func__, ctrl->cntlid, req->sq->qid, 284 le16_to_cpu(data->t_id), 285 req->sq->dhchap_tid); 286 req->sq->dhchap_step = 287 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; 288 req->sq->dhchap_status = 289 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 290 goto done_kfree; 291 } 292 293 switch (data->auth_id) { 294 case NVME_AUTH_DHCHAP_MESSAGE_REPLY: 295 dhchap_status = nvmet_auth_reply(req, d); 296 if (dhchap_status == 0) 297 req->sq->dhchap_step = 298 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1; 299 else { 300 req->sq->dhchap_step = 301 NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; 302 req->sq->dhchap_status = dhchap_status; 303 } 304 goto done_kfree; 305 case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2: 306 req->sq->authenticated = true; 307 pr_debug("%s: ctrl %d qid %d ctrl authenticated\n", 308 __func__, ctrl->cntlid, req->sq->qid); 309 goto done_kfree; 310 case NVME_AUTH_DHCHAP_MESSAGE_FAILURE2: 311 dhchap_status = nvmet_auth_failure2(d); 312 if (dhchap_status) { 313 pr_warn("ctrl %d qid %d: authentication failed (%d)\n", 314 ctrl->cntlid, req->sq->qid, dhchap_status); 315 req->sq->dhchap_status = dhchap_status; 316 req->sq->authenticated = false; 317 } 318 goto done_kfree; 319 default: 320 req->sq->dhchap_status = 321 NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; 322 req->sq->dhchap_step = 323 NVME_AUTH_DHCHAP_MESSAGE_FAILURE2; 324 req->sq->authenticated = false; 325 goto done_kfree; 326 } 327 done_failure1: 328 req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; 329 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2; 330 331 done_kfree: 332 kfree(d); 333 done: 334 pr_debug("%s: ctrl %d qid %d dhchap status %x step %x\n", __func__, 335 ctrl->cntlid, req->sq->qid, 336 req->sq->dhchap_status, req->sq->dhchap_step); 337 if (status) 338 pr_debug("%s: ctrl %d qid %d nvme status %x error loc %d\n", 339 __func__, ctrl->cntlid, req->sq->qid, 340 status, req->error_loc); 341 if (req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2 && 342 req->sq->dhchap_step != NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) { 343 unsigned long auth_expire_secs = ctrl->kato ? ctrl->kato : 120; 344 345 mod_delayed_work(system_wq, &req->sq->auth_expired_work, 346 auth_expire_secs * HZ); 347 goto complete; 348 } 349 /* Final states, clear up variables */ 350 nvmet_auth_sq_free(req->sq); 351 if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE2) 352 nvmet_ctrl_fatal_error(ctrl); 353 354 complete: 355 nvmet_req_complete(req, status); 356 } 357 358 static int nvmet_auth_challenge(struct nvmet_req *req, void *d, int al) 359 { 360 struct nvmf_auth_dhchap_challenge_data *data = d; 361 struct nvmet_ctrl *ctrl = req->sq->ctrl; 362 int ret = 0; 363 int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id); 364 int data_size = sizeof(*d) + hash_len; 365 366 if (ctrl->dh_tfm) 367 data_size += ctrl->dh_keysize; 368 if (al < data_size) { 369 pr_debug("%s: buffer too small (al %d need %d)\n", __func__, 370 al, data_size); 371 return -EINVAL; 372 } 373 memset(data, 0, data_size); 374 req->sq->dhchap_s1 = nvme_auth_get_seqnum(); 375 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; 376 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE; 377 data->t_id = cpu_to_le16(req->sq->dhchap_tid); 378 data->hashid = ctrl->shash_id; 379 data->hl = hash_len; 380 data->seqnum = cpu_to_le32(req->sq->dhchap_s1); 381 req->sq->dhchap_c1 = kmalloc(data->hl, GFP_KERNEL); 382 if (!req->sq->dhchap_c1) 383 return -ENOMEM; 384 get_random_bytes(req->sq->dhchap_c1, data->hl); 385 memcpy(data->cval, req->sq->dhchap_c1, data->hl); 386 if (ctrl->dh_tfm) { 387 data->dhgid = ctrl->dh_gid; 388 data->dhvlen = cpu_to_le16(ctrl->dh_keysize); 389 ret = nvmet_auth_ctrl_exponential(req, data->cval + data->hl, 390 ctrl->dh_keysize); 391 } 392 pr_debug("%s: ctrl %d qid %d seq %d transaction %d hl %d dhvlen %zu\n", 393 __func__, ctrl->cntlid, req->sq->qid, req->sq->dhchap_s1, 394 req->sq->dhchap_tid, data->hl, ctrl->dh_keysize); 395 return ret; 396 } 397 398 static int nvmet_auth_success1(struct nvmet_req *req, void *d, int al) 399 { 400 struct nvmf_auth_dhchap_success1_data *data = d; 401 struct nvmet_ctrl *ctrl = req->sq->ctrl; 402 int hash_len = nvme_auth_hmac_hash_len(ctrl->shash_id); 403 404 WARN_ON(al < sizeof(*data)); 405 memset(data, 0, sizeof(*data)); 406 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; 407 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1; 408 data->t_id = cpu_to_le16(req->sq->dhchap_tid); 409 data->hl = hash_len; 410 if (req->sq->dhchap_c2) { 411 if (!ctrl->ctrl_key) { 412 pr_warn("ctrl %d qid %d no ctrl key\n", 413 ctrl->cntlid, req->sq->qid); 414 return NVME_AUTH_DHCHAP_FAILURE_FAILED; 415 } 416 if (nvmet_auth_ctrl_hash(req, data->rval, data->hl)) 417 return NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 418 data->rvalid = 1; 419 pr_debug("ctrl %d qid %d response %*ph\n", 420 ctrl->cntlid, req->sq->qid, data->hl, data->rval); 421 } 422 return 0; 423 } 424 425 static void nvmet_auth_failure1(struct nvmet_req *req, void *d, int al) 426 { 427 struct nvmf_auth_dhchap_failure_data *data = d; 428 429 WARN_ON(al < sizeof(*data)); 430 data->auth_type = NVME_AUTH_COMMON_MESSAGES; 431 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; 432 data->t_id = cpu_to_le16(req->sq->dhchap_tid); 433 data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED; 434 data->rescode_exp = req->sq->dhchap_status; 435 } 436 437 u32 nvmet_auth_receive_data_len(struct nvmet_req *req) 438 { 439 return le32_to_cpu(req->cmd->auth_receive.al); 440 } 441 442 void nvmet_execute_auth_receive(struct nvmet_req *req) 443 { 444 struct nvmet_ctrl *ctrl = req->sq->ctrl; 445 void *d; 446 u32 al; 447 u16 status = 0; 448 449 if (req->cmd->auth_receive.secp != NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER) { 450 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 451 req->error_loc = 452 offsetof(struct nvmf_auth_receive_command, secp); 453 goto done; 454 } 455 if (req->cmd->auth_receive.spsp0 != 0x01) { 456 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 457 req->error_loc = 458 offsetof(struct nvmf_auth_receive_command, spsp0); 459 goto done; 460 } 461 if (req->cmd->auth_receive.spsp1 != 0x01) { 462 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 463 req->error_loc = 464 offsetof(struct nvmf_auth_receive_command, spsp1); 465 goto done; 466 } 467 al = nvmet_auth_receive_data_len(req); 468 if (!al) { 469 status = NVME_SC_INVALID_FIELD | NVME_STATUS_DNR; 470 req->error_loc = 471 offsetof(struct nvmf_auth_receive_command, al); 472 goto done; 473 } 474 if (!nvmet_check_transfer_len(req, al)) { 475 pr_debug("%s: transfer length mismatch (%u)\n", __func__, al); 476 return; 477 } 478 479 d = kmalloc(al, GFP_KERNEL); 480 if (!d) { 481 status = NVME_SC_INTERNAL; 482 goto done; 483 } 484 pr_debug("%s: ctrl %d qid %d step %x\n", __func__, 485 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step); 486 switch (req->sq->dhchap_step) { 487 case NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE: 488 if (nvmet_auth_challenge(req, d, al) < 0) { 489 pr_warn("ctrl %d qid %d: challenge error (%d)\n", 490 ctrl->cntlid, req->sq->qid, status); 491 status = NVME_SC_INTERNAL; 492 break; 493 } 494 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_REPLY; 495 break; 496 case NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1: 497 status = nvmet_auth_success1(req, d, al); 498 if (status) { 499 req->sq->dhchap_status = status; 500 req->sq->authenticated = false; 501 nvmet_auth_failure1(req, d, al); 502 pr_warn("ctrl %d qid %d: success1 status (%x)\n", 503 ctrl->cntlid, req->sq->qid, 504 req->sq->dhchap_status); 505 break; 506 } 507 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2; 508 break; 509 case NVME_AUTH_DHCHAP_MESSAGE_FAILURE1: 510 req->sq->authenticated = false; 511 nvmet_auth_failure1(req, d, al); 512 pr_warn("ctrl %d qid %d failure1 (%x)\n", 513 ctrl->cntlid, req->sq->qid, req->sq->dhchap_status); 514 break; 515 default: 516 pr_warn("ctrl %d qid %d unhandled step (%d)\n", 517 ctrl->cntlid, req->sq->qid, req->sq->dhchap_step); 518 req->sq->dhchap_step = NVME_AUTH_DHCHAP_MESSAGE_FAILURE1; 519 req->sq->dhchap_status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 520 nvmet_auth_failure1(req, d, al); 521 status = 0; 522 break; 523 } 524 525 status = nvmet_copy_to_sgl(req, 0, d, al); 526 kfree(d); 527 done: 528 if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2) 529 nvmet_auth_sq_free(req->sq); 530 else if (req->sq->dhchap_step == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { 531 nvmet_auth_sq_free(req->sq); 532 nvmet_ctrl_fatal_error(ctrl); 533 } 534 nvmet_req_complete(req, status); 535 } 536