1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2020 Hannes Reinecke, SUSE Linux 4 */ 5 6 #include <linux/crc32.h> 7 #include <linux/base64.h> 8 #include <linux/prandom.h> 9 #include <asm/unaligned.h> 10 #include <crypto/hash.h> 11 #include <crypto/dh.h> 12 #include "nvme.h" 13 #include "fabrics.h" 14 #include <linux/nvme-auth.h> 15 16 #define CHAP_BUF_SIZE 4096 17 static struct kmem_cache *nvme_chap_buf_cache; 18 static mempool_t *nvme_chap_buf_pool; 19 20 struct nvme_dhchap_queue_context { 21 struct list_head entry; 22 struct work_struct auth_work; 23 struct nvme_ctrl *ctrl; 24 struct crypto_shash *shash_tfm; 25 struct crypto_kpp *dh_tfm; 26 void *buf; 27 int qid; 28 int error; 29 u32 s1; 30 u32 s2; 31 u16 transaction; 32 u8 status; 33 u8 hash_id; 34 size_t hash_len; 35 u8 dhgroup_id; 36 u8 c1[64]; 37 u8 c2[64]; 38 u8 response[64]; 39 u8 *host_response; 40 u8 *ctrl_key; 41 int ctrl_key_len; 42 u8 *host_key; 43 int host_key_len; 44 u8 *sess_key; 45 int sess_key_len; 46 }; 47 48 #define nvme_auth_flags_from_qid(qid) \ 49 (qid == 0) ? 0 : BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_RESERVED 50 #define nvme_auth_queue_from_qid(ctrl, qid) \ 51 (qid == 0) ? (ctrl)->fabrics_q : (ctrl)->connect_q 52 53 static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl) 54 { 55 return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues + 56 ctrl->opts->nr_poll_queues + 1; 57 } 58 59 static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid, 60 void *data, size_t data_len, bool auth_send) 61 { 62 struct nvme_command cmd = {}; 63 blk_mq_req_flags_t flags = nvme_auth_flags_from_qid(qid); 64 struct request_queue *q = nvme_auth_queue_from_qid(ctrl, qid); 65 int ret; 66 67 cmd.auth_common.opcode = nvme_fabrics_command; 68 cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER; 69 cmd.auth_common.spsp0 = 0x01; 70 cmd.auth_common.spsp1 = 0x01; 71 if (auth_send) { 72 cmd.auth_send.fctype = nvme_fabrics_type_auth_send; 73 cmd.auth_send.tl = cpu_to_le32(data_len); 74 } else { 75 cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive; 76 cmd.auth_receive.al = cpu_to_le32(data_len); 77 } 78 79 ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len, 80 qid == 0 ? NVME_QID_ANY : qid, 81 0, flags); 82 if (ret > 0) 83 dev_warn(ctrl->device, 84 "qid %d auth_send failed with status %d\n", qid, ret); 85 else if (ret < 0) 86 dev_err(ctrl->device, 87 "qid %d auth_send failed with error %d\n", qid, ret); 88 return ret; 89 } 90 91 static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid, 92 struct nvmf_auth_dhchap_failure_data *data, 93 u16 transaction, u8 expected_msg) 94 { 95 dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n", 96 __func__, qid, data->auth_type, data->auth_id); 97 98 if (data->auth_type == NVME_AUTH_COMMON_MESSAGES && 99 data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { 100 return data->rescode_exp; 101 } 102 if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES || 103 data->auth_id != expected_msg) { 104 dev_warn(ctrl->device, 105 "qid %d invalid message %02x/%02x\n", 106 qid, data->auth_type, data->auth_id); 107 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; 108 } 109 if (le16_to_cpu(data->t_id) != transaction) { 110 dev_warn(ctrl->device, 111 "qid %d invalid transaction ID %d\n", 112 qid, le16_to_cpu(data->t_id)); 113 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; 114 } 115 return 0; 116 } 117 118 static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl, 119 struct nvme_dhchap_queue_context *chap) 120 { 121 struct nvmf_auth_dhchap_negotiate_data *data = chap->buf; 122 size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol); 123 124 if (size > CHAP_BUF_SIZE) { 125 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 126 return -EINVAL; 127 } 128 memset((u8 *)chap->buf, 0, size); 129 data->auth_type = NVME_AUTH_COMMON_MESSAGES; 130 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; 131 data->t_id = cpu_to_le16(chap->transaction); 132 data->sc_c = 0; /* No secure channel concatenation */ 133 data->napd = 1; 134 data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID; 135 data->auth_protocol[0].dhchap.halen = 3; 136 data->auth_protocol[0].dhchap.dhlen = 6; 137 data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256; 138 data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384; 139 data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512; 140 data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL; 141 data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048; 142 data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072; 143 data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096; 144 data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144; 145 data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192; 146 147 return size; 148 } 149 150 static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl, 151 struct nvme_dhchap_queue_context *chap) 152 { 153 struct nvmf_auth_dhchap_challenge_data *data = chap->buf; 154 u16 dhvlen = le16_to_cpu(data->dhvlen); 155 size_t size = sizeof(*data) + data->hl + dhvlen; 156 const char *gid_name = nvme_auth_dhgroup_name(data->dhgid); 157 const char *hmac_name, *kpp_name; 158 159 if (size > CHAP_BUF_SIZE) { 160 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 161 return NVME_SC_INVALID_FIELD; 162 } 163 164 hmac_name = nvme_auth_hmac_name(data->hashid); 165 if (!hmac_name) { 166 dev_warn(ctrl->device, 167 "qid %d: invalid HASH ID %d\n", 168 chap->qid, data->hashid); 169 chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 170 return NVME_SC_INVALID_FIELD; 171 } 172 173 if (chap->hash_id == data->hashid && chap->shash_tfm && 174 !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) && 175 crypto_shash_digestsize(chap->shash_tfm) == data->hl) { 176 dev_dbg(ctrl->device, 177 "qid %d: reuse existing hash %s\n", 178 chap->qid, hmac_name); 179 goto select_kpp; 180 } 181 182 /* Reset if hash cannot be reused */ 183 if (chap->shash_tfm) { 184 crypto_free_shash(chap->shash_tfm); 185 chap->hash_id = 0; 186 chap->hash_len = 0; 187 } 188 chap->shash_tfm = crypto_alloc_shash(hmac_name, 0, 189 CRYPTO_ALG_ALLOCATES_MEMORY); 190 if (IS_ERR(chap->shash_tfm)) { 191 dev_warn(ctrl->device, 192 "qid %d: failed to allocate hash %s, error %ld\n", 193 chap->qid, hmac_name, PTR_ERR(chap->shash_tfm)); 194 chap->shash_tfm = NULL; 195 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 196 return NVME_SC_AUTH_REQUIRED; 197 } 198 199 if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) { 200 dev_warn(ctrl->device, 201 "qid %d: invalid hash length %d\n", 202 chap->qid, data->hl); 203 crypto_free_shash(chap->shash_tfm); 204 chap->shash_tfm = NULL; 205 chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 206 return NVME_SC_AUTH_REQUIRED; 207 } 208 209 chap->hash_id = data->hashid; 210 chap->hash_len = data->hl; 211 dev_dbg(ctrl->device, "qid %d: selected hash %s\n", 212 chap->qid, hmac_name); 213 214 select_kpp: 215 kpp_name = nvme_auth_dhgroup_kpp(data->dhgid); 216 if (!kpp_name) { 217 dev_warn(ctrl->device, 218 "qid %d: invalid DH group id %d\n", 219 chap->qid, data->dhgid); 220 chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; 221 /* Leave previous dh_tfm intact */ 222 return NVME_SC_AUTH_REQUIRED; 223 } 224 225 if (chap->dhgroup_id == data->dhgid && 226 (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) { 227 dev_dbg(ctrl->device, 228 "qid %d: reuse existing DH group %s\n", 229 chap->qid, gid_name); 230 goto skip_kpp; 231 } 232 233 /* Reset dh_tfm if it can't be reused */ 234 if (chap->dh_tfm) { 235 crypto_free_kpp(chap->dh_tfm); 236 chap->dh_tfm = NULL; 237 } 238 239 if (data->dhgid != NVME_AUTH_DHGROUP_NULL) { 240 if (dhvlen == 0) { 241 dev_warn(ctrl->device, 242 "qid %d: empty DH value\n", 243 chap->qid); 244 chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; 245 return NVME_SC_INVALID_FIELD; 246 } 247 248 chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0); 249 if (IS_ERR(chap->dh_tfm)) { 250 int ret = PTR_ERR(chap->dh_tfm); 251 252 dev_warn(ctrl->device, 253 "qid %d: error %d initializing DH group %s\n", 254 chap->qid, ret, gid_name); 255 chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; 256 chap->dh_tfm = NULL; 257 return NVME_SC_AUTH_REQUIRED; 258 } 259 dev_dbg(ctrl->device, "qid %d: selected DH group %s\n", 260 chap->qid, gid_name); 261 } else if (dhvlen != 0) { 262 dev_warn(ctrl->device, 263 "qid %d: invalid DH value for NULL DH\n", 264 chap->qid); 265 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 266 return NVME_SC_INVALID_FIELD; 267 } 268 chap->dhgroup_id = data->dhgid; 269 270 skip_kpp: 271 chap->s1 = le32_to_cpu(data->seqnum); 272 memcpy(chap->c1, data->cval, chap->hash_len); 273 if (dhvlen) { 274 chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL); 275 if (!chap->ctrl_key) { 276 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 277 return NVME_SC_AUTH_REQUIRED; 278 } 279 chap->ctrl_key_len = dhvlen; 280 memcpy(chap->ctrl_key, data->cval + chap->hash_len, 281 dhvlen); 282 dev_dbg(ctrl->device, "ctrl public key %*ph\n", 283 (int)chap->ctrl_key_len, chap->ctrl_key); 284 } 285 286 return 0; 287 } 288 289 static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl, 290 struct nvme_dhchap_queue_context *chap) 291 { 292 struct nvmf_auth_dhchap_reply_data *data = chap->buf; 293 size_t size = sizeof(*data); 294 295 size += 2 * chap->hash_len; 296 297 if (chap->host_key_len) 298 size += chap->host_key_len; 299 300 if (size > CHAP_BUF_SIZE) { 301 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 302 return -EINVAL; 303 } 304 305 memset(chap->buf, 0, size); 306 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; 307 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY; 308 data->t_id = cpu_to_le16(chap->transaction); 309 data->hl = chap->hash_len; 310 data->dhvlen = cpu_to_le16(chap->host_key_len); 311 memcpy(data->rval, chap->response, chap->hash_len); 312 if (ctrl->ctrl_key) { 313 get_random_bytes(chap->c2, chap->hash_len); 314 data->cvalid = 1; 315 chap->s2 = nvme_auth_get_seqnum(); 316 memcpy(data->rval + chap->hash_len, chap->c2, 317 chap->hash_len); 318 dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n", 319 __func__, chap->qid, (int)chap->hash_len, chap->c2); 320 } else { 321 memset(chap->c2, 0, chap->hash_len); 322 chap->s2 = 0; 323 } 324 data->seqnum = cpu_to_le32(chap->s2); 325 if (chap->host_key_len) { 326 dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n", 327 __func__, chap->qid, 328 chap->host_key_len, chap->host_key); 329 memcpy(data->rval + 2 * chap->hash_len, chap->host_key, 330 chap->host_key_len); 331 } 332 333 return size; 334 } 335 336 static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl, 337 struct nvme_dhchap_queue_context *chap) 338 { 339 struct nvmf_auth_dhchap_success1_data *data = chap->buf; 340 size_t size = sizeof(*data); 341 342 if (chap->ctrl_key) 343 size += chap->hash_len; 344 345 if (size > CHAP_BUF_SIZE) { 346 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 347 return NVME_SC_INVALID_FIELD; 348 } 349 350 if (data->hl != chap->hash_len) { 351 dev_warn(ctrl->device, 352 "qid %d: invalid hash length %u\n", 353 chap->qid, data->hl); 354 chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 355 return NVME_SC_INVALID_FIELD; 356 } 357 358 /* Just print out information for the admin queue */ 359 if (chap->qid == 0) 360 dev_info(ctrl->device, 361 "qid 0: authenticated with hash %s dhgroup %s\n", 362 nvme_auth_hmac_name(chap->hash_id), 363 nvme_auth_dhgroup_name(chap->dhgroup_id)); 364 365 if (!data->rvalid) 366 return 0; 367 368 /* Validate controller response */ 369 if (memcmp(chap->response, data->rval, data->hl)) { 370 dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n", 371 __func__, chap->qid, (int)chap->hash_len, data->rval); 372 dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n", 373 __func__, chap->qid, (int)chap->hash_len, 374 chap->response); 375 dev_warn(ctrl->device, 376 "qid %d: controller authentication failed\n", 377 chap->qid); 378 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 379 return NVME_SC_AUTH_REQUIRED; 380 } 381 382 /* Just print out information for the admin queue */ 383 if (chap->qid == 0) 384 dev_info(ctrl->device, 385 "qid 0: controller authenticated\n"); 386 return 0; 387 } 388 389 static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl, 390 struct nvme_dhchap_queue_context *chap) 391 { 392 struct nvmf_auth_dhchap_success2_data *data = chap->buf; 393 size_t size = sizeof(*data); 394 395 memset(chap->buf, 0, size); 396 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; 397 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2; 398 data->t_id = cpu_to_le16(chap->transaction); 399 400 return size; 401 } 402 403 static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl, 404 struct nvme_dhchap_queue_context *chap) 405 { 406 struct nvmf_auth_dhchap_failure_data *data = chap->buf; 407 size_t size = sizeof(*data); 408 409 memset(chap->buf, 0, size); 410 data->auth_type = NVME_AUTH_COMMON_MESSAGES; 411 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2; 412 data->t_id = cpu_to_le16(chap->transaction); 413 data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED; 414 data->rescode_exp = chap->status; 415 416 return size; 417 } 418 419 static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl, 420 struct nvme_dhchap_queue_context *chap) 421 { 422 SHASH_DESC_ON_STACK(shash, chap->shash_tfm); 423 u8 buf[4], *challenge = chap->c1; 424 int ret; 425 426 dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n", 427 __func__, chap->qid, chap->s1, chap->transaction); 428 429 if (!chap->host_response) { 430 chap->host_response = nvme_auth_transform_key(ctrl->host_key, 431 ctrl->opts->host->nqn); 432 if (IS_ERR(chap->host_response)) { 433 ret = PTR_ERR(chap->host_response); 434 chap->host_response = NULL; 435 return ret; 436 } 437 } else { 438 dev_dbg(ctrl->device, "%s: qid %d re-using host response\n", 439 __func__, chap->qid); 440 } 441 442 ret = crypto_shash_setkey(chap->shash_tfm, 443 chap->host_response, ctrl->host_key->len); 444 if (ret) { 445 dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n", 446 chap->qid, ret); 447 goto out; 448 } 449 450 if (chap->dh_tfm) { 451 challenge = kmalloc(chap->hash_len, GFP_KERNEL); 452 if (!challenge) { 453 ret = -ENOMEM; 454 goto out; 455 } 456 ret = nvme_auth_augmented_challenge(chap->hash_id, 457 chap->sess_key, 458 chap->sess_key_len, 459 chap->c1, challenge, 460 chap->hash_len); 461 if (ret) 462 goto out; 463 } 464 465 shash->tfm = chap->shash_tfm; 466 ret = crypto_shash_init(shash); 467 if (ret) 468 goto out; 469 ret = crypto_shash_update(shash, challenge, chap->hash_len); 470 if (ret) 471 goto out; 472 put_unaligned_le32(chap->s1, buf); 473 ret = crypto_shash_update(shash, buf, 4); 474 if (ret) 475 goto out; 476 put_unaligned_le16(chap->transaction, buf); 477 ret = crypto_shash_update(shash, buf, 2); 478 if (ret) 479 goto out; 480 memset(buf, 0, sizeof(buf)); 481 ret = crypto_shash_update(shash, buf, 1); 482 if (ret) 483 goto out; 484 ret = crypto_shash_update(shash, "HostHost", 8); 485 if (ret) 486 goto out; 487 ret = crypto_shash_update(shash, ctrl->opts->host->nqn, 488 strlen(ctrl->opts->host->nqn)); 489 if (ret) 490 goto out; 491 ret = crypto_shash_update(shash, buf, 1); 492 if (ret) 493 goto out; 494 ret = crypto_shash_update(shash, ctrl->opts->subsysnqn, 495 strlen(ctrl->opts->subsysnqn)); 496 if (ret) 497 goto out; 498 ret = crypto_shash_final(shash, chap->response); 499 out: 500 if (challenge != chap->c1) 501 kfree(challenge); 502 return ret; 503 } 504 505 static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl, 506 struct nvme_dhchap_queue_context *chap) 507 { 508 SHASH_DESC_ON_STACK(shash, chap->shash_tfm); 509 u8 *ctrl_response; 510 u8 buf[4], *challenge = chap->c2; 511 int ret; 512 513 ctrl_response = nvme_auth_transform_key(ctrl->ctrl_key, 514 ctrl->opts->subsysnqn); 515 if (IS_ERR(ctrl_response)) { 516 ret = PTR_ERR(ctrl_response); 517 return ret; 518 } 519 520 ret = crypto_shash_setkey(chap->shash_tfm, 521 ctrl_response, ctrl->ctrl_key->len); 522 if (ret) { 523 dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n", 524 chap->qid, ret); 525 goto out; 526 } 527 528 if (chap->dh_tfm) { 529 challenge = kmalloc(chap->hash_len, GFP_KERNEL); 530 if (!challenge) { 531 ret = -ENOMEM; 532 goto out; 533 } 534 ret = nvme_auth_augmented_challenge(chap->hash_id, 535 chap->sess_key, 536 chap->sess_key_len, 537 chap->c2, challenge, 538 chap->hash_len); 539 if (ret) 540 goto out; 541 } 542 dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n", 543 __func__, chap->qid, chap->s2, chap->transaction); 544 dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n", 545 __func__, chap->qid, (int)chap->hash_len, challenge); 546 dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n", 547 __func__, chap->qid, ctrl->opts->subsysnqn); 548 dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n", 549 __func__, chap->qid, ctrl->opts->host->nqn); 550 shash->tfm = chap->shash_tfm; 551 ret = crypto_shash_init(shash); 552 if (ret) 553 goto out; 554 ret = crypto_shash_update(shash, challenge, chap->hash_len); 555 if (ret) 556 goto out; 557 put_unaligned_le32(chap->s2, buf); 558 ret = crypto_shash_update(shash, buf, 4); 559 if (ret) 560 goto out; 561 put_unaligned_le16(chap->transaction, buf); 562 ret = crypto_shash_update(shash, buf, 2); 563 if (ret) 564 goto out; 565 memset(buf, 0, 4); 566 ret = crypto_shash_update(shash, buf, 1); 567 if (ret) 568 goto out; 569 ret = crypto_shash_update(shash, "Controller", 10); 570 if (ret) 571 goto out; 572 ret = crypto_shash_update(shash, ctrl->opts->subsysnqn, 573 strlen(ctrl->opts->subsysnqn)); 574 if (ret) 575 goto out; 576 ret = crypto_shash_update(shash, buf, 1); 577 if (ret) 578 goto out; 579 ret = crypto_shash_update(shash, ctrl->opts->host->nqn, 580 strlen(ctrl->opts->host->nqn)); 581 if (ret) 582 goto out; 583 ret = crypto_shash_final(shash, chap->response); 584 out: 585 if (challenge != chap->c2) 586 kfree(challenge); 587 kfree(ctrl_response); 588 return ret; 589 } 590 591 static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl, 592 struct nvme_dhchap_queue_context *chap) 593 { 594 int ret; 595 596 if (chap->host_key && chap->host_key_len) { 597 dev_dbg(ctrl->device, 598 "qid %d: reusing host key\n", chap->qid); 599 goto gen_sesskey; 600 } 601 ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id); 602 if (ret < 0) { 603 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 604 return ret; 605 } 606 607 chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm); 608 609 chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL); 610 if (!chap->host_key) { 611 chap->host_key_len = 0; 612 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 613 return -ENOMEM; 614 } 615 ret = nvme_auth_gen_pubkey(chap->dh_tfm, 616 chap->host_key, chap->host_key_len); 617 if (ret) { 618 dev_dbg(ctrl->device, 619 "failed to generate public key, error %d\n", ret); 620 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 621 return ret; 622 } 623 624 gen_sesskey: 625 chap->sess_key_len = chap->host_key_len; 626 chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL); 627 if (!chap->sess_key) { 628 chap->sess_key_len = 0; 629 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 630 return -ENOMEM; 631 } 632 633 ret = nvme_auth_gen_shared_secret(chap->dh_tfm, 634 chap->ctrl_key, chap->ctrl_key_len, 635 chap->sess_key, chap->sess_key_len); 636 if (ret) { 637 dev_dbg(ctrl->device, 638 "failed to generate shared secret, error %d\n", ret); 639 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 640 return ret; 641 } 642 dev_dbg(ctrl->device, "shared secret %*ph\n", 643 (int)chap->sess_key_len, chap->sess_key); 644 return 0; 645 } 646 647 static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap) 648 { 649 kfree_sensitive(chap->host_response); 650 chap->host_response = NULL; 651 kfree_sensitive(chap->host_key); 652 chap->host_key = NULL; 653 chap->host_key_len = 0; 654 kfree_sensitive(chap->ctrl_key); 655 chap->ctrl_key = NULL; 656 chap->ctrl_key_len = 0; 657 kfree_sensitive(chap->sess_key); 658 chap->sess_key = NULL; 659 chap->sess_key_len = 0; 660 chap->status = 0; 661 chap->error = 0; 662 chap->s1 = 0; 663 chap->s2 = 0; 664 chap->transaction = 0; 665 memset(chap->c1, 0, sizeof(chap->c1)); 666 memset(chap->c2, 0, sizeof(chap->c2)); 667 mempool_free(chap->buf, nvme_chap_buf_pool); 668 chap->buf = NULL; 669 } 670 671 static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap) 672 { 673 nvme_auth_reset_dhchap(chap); 674 if (chap->shash_tfm) 675 crypto_free_shash(chap->shash_tfm); 676 if (chap->dh_tfm) 677 crypto_free_kpp(chap->dh_tfm); 678 } 679 680 static void nvme_queue_auth_work(struct work_struct *work) 681 { 682 struct nvme_dhchap_queue_context *chap = 683 container_of(work, struct nvme_dhchap_queue_context, auth_work); 684 struct nvme_ctrl *ctrl = chap->ctrl; 685 size_t tl; 686 int ret = 0; 687 688 /* 689 * Allocate a large enough buffer for the entire negotiation: 690 * 4k is enough to ffdhe8192. 691 */ 692 chap->buf = mempool_alloc(nvme_chap_buf_pool, GFP_KERNEL); 693 if (!chap->buf) { 694 chap->error = -ENOMEM; 695 return; 696 } 697 698 chap->transaction = ctrl->transaction++; 699 700 /* DH-HMAC-CHAP Step 1: send negotiate */ 701 dev_dbg(ctrl->device, "%s: qid %d send negotiate\n", 702 __func__, chap->qid); 703 ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap); 704 if (ret < 0) { 705 chap->error = ret; 706 return; 707 } 708 tl = ret; 709 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); 710 if (ret) { 711 chap->error = ret; 712 return; 713 } 714 715 /* DH-HMAC-CHAP Step 2: receive challenge */ 716 dev_dbg(ctrl->device, "%s: qid %d receive challenge\n", 717 __func__, chap->qid); 718 719 memset(chap->buf, 0, CHAP_BUF_SIZE); 720 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE, 721 false); 722 if (ret) { 723 dev_warn(ctrl->device, 724 "qid %d failed to receive challenge, %s %d\n", 725 chap->qid, ret < 0 ? "error" : "nvme status", ret); 726 chap->error = ret; 727 return; 728 } 729 ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction, 730 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE); 731 if (ret) { 732 chap->status = ret; 733 chap->error = NVME_SC_AUTH_REQUIRED; 734 return; 735 } 736 737 ret = nvme_auth_process_dhchap_challenge(ctrl, chap); 738 if (ret) { 739 /* Invalid challenge parameters */ 740 chap->error = ret; 741 goto fail2; 742 } 743 744 if (chap->ctrl_key_len) { 745 dev_dbg(ctrl->device, 746 "%s: qid %d DH exponential\n", 747 __func__, chap->qid); 748 ret = nvme_auth_dhchap_exponential(ctrl, chap); 749 if (ret) { 750 chap->error = ret; 751 goto fail2; 752 } 753 } 754 755 dev_dbg(ctrl->device, "%s: qid %d host response\n", 756 __func__, chap->qid); 757 mutex_lock(&ctrl->dhchap_auth_mutex); 758 ret = nvme_auth_dhchap_setup_host_response(ctrl, chap); 759 if (ret) { 760 mutex_unlock(&ctrl->dhchap_auth_mutex); 761 chap->error = ret; 762 goto fail2; 763 } 764 mutex_unlock(&ctrl->dhchap_auth_mutex); 765 766 /* DH-HMAC-CHAP Step 3: send reply */ 767 dev_dbg(ctrl->device, "%s: qid %d send reply\n", 768 __func__, chap->qid); 769 ret = nvme_auth_set_dhchap_reply_data(ctrl, chap); 770 if (ret < 0) { 771 chap->error = ret; 772 goto fail2; 773 } 774 775 tl = ret; 776 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); 777 if (ret) { 778 chap->error = ret; 779 goto fail2; 780 } 781 782 /* DH-HMAC-CHAP Step 4: receive success1 */ 783 dev_dbg(ctrl->device, "%s: qid %d receive success1\n", 784 __func__, chap->qid); 785 786 memset(chap->buf, 0, CHAP_BUF_SIZE); 787 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE, 788 false); 789 if (ret) { 790 dev_warn(ctrl->device, 791 "qid %d failed to receive success1, %s %d\n", 792 chap->qid, ret < 0 ? "error" : "nvme status", ret); 793 chap->error = ret; 794 return; 795 } 796 ret = nvme_auth_receive_validate(ctrl, chap->qid, 797 chap->buf, chap->transaction, 798 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1); 799 if (ret) { 800 chap->status = ret; 801 chap->error = NVME_SC_AUTH_REQUIRED; 802 return; 803 } 804 805 mutex_lock(&ctrl->dhchap_auth_mutex); 806 if (ctrl->ctrl_key) { 807 dev_dbg(ctrl->device, 808 "%s: qid %d controller response\n", 809 __func__, chap->qid); 810 ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap); 811 if (ret) { 812 mutex_unlock(&ctrl->dhchap_auth_mutex); 813 chap->error = ret; 814 goto fail2; 815 } 816 } 817 mutex_unlock(&ctrl->dhchap_auth_mutex); 818 819 ret = nvme_auth_process_dhchap_success1(ctrl, chap); 820 if (ret) { 821 /* Controller authentication failed */ 822 chap->error = NVME_SC_AUTH_REQUIRED; 823 goto fail2; 824 } 825 826 if (chap->ctrl_key) { 827 /* DH-HMAC-CHAP Step 5: send success2 */ 828 dev_dbg(ctrl->device, "%s: qid %d send success2\n", 829 __func__, chap->qid); 830 tl = nvme_auth_set_dhchap_success2_data(ctrl, chap); 831 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); 832 if (ret) 833 chap->error = ret; 834 } 835 if (!ret) { 836 chap->error = 0; 837 return; 838 } 839 840 fail2: 841 dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n", 842 __func__, chap->qid, chap->status); 843 tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap); 844 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); 845 /* 846 * only update error if send failure2 failed and no other 847 * error had been set during authentication. 848 */ 849 if (ret && !chap->error) 850 chap->error = ret; 851 } 852 853 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid) 854 { 855 struct nvme_dhchap_queue_context *chap; 856 857 if (!ctrl->host_key) { 858 dev_warn(ctrl->device, "qid %d: no key\n", qid); 859 return -ENOKEY; 860 } 861 862 if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) { 863 dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid); 864 return -ENOKEY; 865 } 866 867 chap = &ctrl->dhchap_ctxs[qid]; 868 cancel_work_sync(&chap->auth_work); 869 queue_work(nvme_wq, &chap->auth_work); 870 return 0; 871 } 872 EXPORT_SYMBOL_GPL(nvme_auth_negotiate); 873 874 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid) 875 { 876 struct nvme_dhchap_queue_context *chap; 877 int ret; 878 879 chap = &ctrl->dhchap_ctxs[qid]; 880 flush_work(&chap->auth_work); 881 ret = chap->error; 882 /* clear sensitive info */ 883 nvme_auth_reset_dhchap(chap); 884 return ret; 885 } 886 EXPORT_SYMBOL_GPL(nvme_auth_wait); 887 888 static void nvme_ctrl_auth_work(struct work_struct *work) 889 { 890 struct nvme_ctrl *ctrl = 891 container_of(work, struct nvme_ctrl, dhchap_auth_work); 892 int ret, q; 893 894 /* 895 * If the ctrl is no connected, bail as reconnect will handle 896 * authentication. 897 */ 898 if (ctrl->state != NVME_CTRL_LIVE) 899 return; 900 901 /* Authenticate admin queue first */ 902 ret = nvme_auth_negotiate(ctrl, 0); 903 if (ret) { 904 dev_warn(ctrl->device, 905 "qid 0: error %d setting up authentication\n", ret); 906 return; 907 } 908 ret = nvme_auth_wait(ctrl, 0); 909 if (ret) { 910 dev_warn(ctrl->device, 911 "qid 0: authentication failed\n"); 912 return; 913 } 914 915 for (q = 1; q < ctrl->queue_count; q++) { 916 ret = nvme_auth_negotiate(ctrl, q); 917 if (ret) { 918 dev_warn(ctrl->device, 919 "qid %d: error %d setting up authentication\n", 920 q, ret); 921 break; 922 } 923 } 924 925 /* 926 * Failure is a soft-state; credentials remain valid until 927 * the controller terminates the connection. 928 */ 929 for (q = 1; q < ctrl->queue_count; q++) { 930 ret = nvme_auth_wait(ctrl, q); 931 if (ret) 932 dev_warn(ctrl->device, 933 "qid %d: authentication failed\n", q); 934 } 935 } 936 937 int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) 938 { 939 struct nvme_dhchap_queue_context *chap; 940 int i, ret; 941 942 mutex_init(&ctrl->dhchap_auth_mutex); 943 INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work); 944 if (!ctrl->opts) 945 return 0; 946 ret = nvme_auth_generate_key(ctrl->opts->dhchap_secret, 947 &ctrl->host_key); 948 if (ret) 949 return ret; 950 ret = nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret, 951 &ctrl->ctrl_key); 952 if (ret) 953 goto err_free_dhchap_secret; 954 955 if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret) 956 return ret; 957 958 ctrl->dhchap_ctxs = kvcalloc(ctrl_max_dhchaps(ctrl), 959 sizeof(*chap), GFP_KERNEL); 960 if (!ctrl->dhchap_ctxs) { 961 ret = -ENOMEM; 962 goto err_free_dhchap_ctrl_secret; 963 } 964 965 for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) { 966 chap = &ctrl->dhchap_ctxs[i]; 967 chap->qid = i; 968 chap->ctrl = ctrl; 969 INIT_WORK(&chap->auth_work, nvme_queue_auth_work); 970 } 971 972 return 0; 973 err_free_dhchap_ctrl_secret: 974 nvme_auth_free_key(ctrl->ctrl_key); 975 ctrl->ctrl_key = NULL; 976 err_free_dhchap_secret: 977 nvme_auth_free_key(ctrl->host_key); 978 ctrl->host_key = NULL; 979 return ret; 980 } 981 EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl); 982 983 void nvme_auth_stop(struct nvme_ctrl *ctrl) 984 { 985 cancel_work_sync(&ctrl->dhchap_auth_work); 986 } 987 EXPORT_SYMBOL_GPL(nvme_auth_stop); 988 989 void nvme_auth_free(struct nvme_ctrl *ctrl) 990 { 991 int i; 992 993 if (ctrl->dhchap_ctxs) { 994 for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) 995 nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]); 996 kfree(ctrl->dhchap_ctxs); 997 } 998 if (ctrl->host_key) { 999 nvme_auth_free_key(ctrl->host_key); 1000 ctrl->host_key = NULL; 1001 } 1002 if (ctrl->ctrl_key) { 1003 nvme_auth_free_key(ctrl->ctrl_key); 1004 ctrl->ctrl_key = NULL; 1005 } 1006 } 1007 EXPORT_SYMBOL_GPL(nvme_auth_free); 1008 1009 int __init nvme_init_auth(void) 1010 { 1011 nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache", 1012 CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL); 1013 if (!nvme_chap_buf_cache) 1014 return -ENOMEM; 1015 1016 nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab, 1017 mempool_free_slab, nvme_chap_buf_cache); 1018 if (!nvme_chap_buf_pool) 1019 goto err_destroy_chap_buf_cache; 1020 1021 return 0; 1022 err_destroy_chap_buf_cache: 1023 kmem_cache_destroy(nvme_chap_buf_cache); 1024 return -ENOMEM; 1025 } 1026 1027 void __exit nvme_exit_auth(void) 1028 { 1029 mempool_destroy(nvme_chap_buf_pool); 1030 kmem_cache_destroy(nvme_chap_buf_cache); 1031 } 1032