1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2020 Hannes Reinecke, SUSE Linux 4 */ 5 6 #include <linux/crc32.h> 7 #include <linux/base64.h> 8 #include <linux/prandom.h> 9 #include <linux/unaligned.h> 10 #include <crypto/hash.h> 11 #include <crypto/dh.h> 12 #include "nvme.h" 13 #include "fabrics.h" 14 #include <linux/nvme-auth.h> 15 #include <linux/nvme-keyring.h> 16 17 #define CHAP_BUF_SIZE 4096 18 static struct kmem_cache *nvme_chap_buf_cache; 19 static mempool_t *nvme_chap_buf_pool; 20 21 struct nvme_dhchap_queue_context { 22 struct list_head entry; 23 struct work_struct auth_work; 24 struct nvme_ctrl *ctrl; 25 struct crypto_shash *shash_tfm; 26 struct crypto_kpp *dh_tfm; 27 struct nvme_dhchap_key *transformed_key; 28 void *buf; 29 int qid; 30 int error; 31 u32 s1; 32 u32 s2; 33 bool bi_directional; 34 bool authenticated; 35 u16 transaction; 36 u8 status; 37 u8 dhgroup_id; 38 u8 hash_id; 39 u8 sc_c; 40 size_t hash_len; 41 u8 c1[NVME_AUTH_MAX_DIGEST_SIZE]; 42 u8 c2[NVME_AUTH_MAX_DIGEST_SIZE]; 43 u8 response[NVME_AUTH_MAX_DIGEST_SIZE]; 44 u8 *ctrl_key; 45 u8 *host_key; 46 u8 *sess_key; 47 int ctrl_key_len; 48 int host_key_len; 49 int sess_key_len; 50 }; 51 52 static struct workqueue_struct *nvme_auth_wq; 53 54 static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl) 55 { 56 return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues + 57 ctrl->opts->nr_poll_queues + 1; 58 } 59 60 static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid, 61 void *data, size_t data_len, bool auth_send) 62 { 63 struct nvme_command cmd = {}; 64 nvme_submit_flags_t flags = NVME_SUBMIT_RETRY; 65 struct request_queue *q = ctrl->fabrics_q; 66 int ret; 67 68 if (qid != 0) { 69 flags |= NVME_SUBMIT_NOWAIT | NVME_SUBMIT_RESERVED; 70 q = ctrl->connect_q; 71 } 72 73 cmd.auth_common.opcode = nvme_fabrics_command; 74 cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER; 75 cmd.auth_common.spsp0 = 0x01; 76 cmd.auth_common.spsp1 = 0x01; 77 if (auth_send) { 78 cmd.auth_send.fctype = nvme_fabrics_type_auth_send; 79 cmd.auth_send.tl = cpu_to_le32(data_len); 80 } else { 81 cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive; 82 cmd.auth_receive.al = cpu_to_le32(data_len); 83 } 84 85 ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len, 86 qid == 0 ? NVME_QID_ANY : qid, flags); 87 if (ret > 0) 88 dev_warn(ctrl->device, 89 "qid %d auth_send failed with status %d\n", qid, ret); 90 else if (ret < 0) 91 dev_err(ctrl->device, 92 "qid %d auth_send failed with error %d\n", qid, ret); 93 return ret; 94 } 95 96 static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid, 97 struct nvmf_auth_dhchap_failure_data *data, 98 u16 transaction, u8 expected_msg) 99 { 100 dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n", 101 __func__, qid, data->auth_type, data->auth_id); 102 103 if (data->auth_type == NVME_AUTH_COMMON_MESSAGES && 104 data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { 105 return data->rescode_exp; 106 } 107 if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES || 108 data->auth_id != expected_msg) { 109 dev_warn(ctrl->device, 110 "qid %d invalid message %02x/%02x\n", 111 qid, data->auth_type, data->auth_id); 112 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; 113 } 114 if (le16_to_cpu(data->t_id) != transaction) { 115 dev_warn(ctrl->device, 116 "qid %d invalid transaction ID %d\n", 117 qid, le16_to_cpu(data->t_id)); 118 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; 119 } 120 return 0; 121 } 122 123 static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl, 124 struct nvme_dhchap_queue_context *chap) 125 { 126 struct nvmf_auth_dhchap_negotiate_data *data = chap->buf; 127 size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol); 128 129 if (size > CHAP_BUF_SIZE) { 130 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 131 return -EINVAL; 132 } 133 memset((u8 *)chap->buf, 0, size); 134 data->auth_type = NVME_AUTH_COMMON_MESSAGES; 135 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; 136 data->t_id = cpu_to_le16(chap->transaction); 137 if (ctrl->opts->concat && chap->qid == 0) { 138 if (ctrl->opts->tls_key) 139 data->sc_c = NVME_AUTH_SECP_REPLACETLSPSK; 140 else 141 data->sc_c = NVME_AUTH_SECP_NEWTLSPSK; 142 } else 143 data->sc_c = NVME_AUTH_SECP_NOSC; 144 data->napd = 1; 145 data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID; 146 data->auth_protocol[0].dhchap.halen = 3; 147 data->auth_protocol[0].dhchap.dhlen = 6; 148 data->auth_protocol[0].dhchap.idlist[0] = NVME_AUTH_HASH_SHA256; 149 data->auth_protocol[0].dhchap.idlist[1] = NVME_AUTH_HASH_SHA384; 150 data->auth_protocol[0].dhchap.idlist[2] = NVME_AUTH_HASH_SHA512; 151 data->auth_protocol[0].dhchap.idlist[30] = NVME_AUTH_DHGROUP_NULL; 152 data->auth_protocol[0].dhchap.idlist[31] = NVME_AUTH_DHGROUP_2048; 153 data->auth_protocol[0].dhchap.idlist[32] = NVME_AUTH_DHGROUP_3072; 154 data->auth_protocol[0].dhchap.idlist[33] = NVME_AUTH_DHGROUP_4096; 155 data->auth_protocol[0].dhchap.idlist[34] = NVME_AUTH_DHGROUP_6144; 156 data->auth_protocol[0].dhchap.idlist[35] = NVME_AUTH_DHGROUP_8192; 157 158 chap->sc_c = data->sc_c; 159 160 return size; 161 } 162 163 static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl, 164 struct nvme_dhchap_queue_context *chap) 165 { 166 struct nvmf_auth_dhchap_challenge_data *data = chap->buf; 167 u16 dhvlen = le16_to_cpu(data->dhvlen); 168 size_t size = sizeof(*data) + data->hl + dhvlen; 169 const char *gid_name = nvme_auth_dhgroup_name(data->dhgid); 170 const char *hmac_name, *kpp_name; 171 172 if (size > CHAP_BUF_SIZE) { 173 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 174 return -EINVAL; 175 } 176 177 hmac_name = nvme_auth_hmac_name(data->hashid); 178 if (!hmac_name) { 179 dev_warn(ctrl->device, 180 "qid %d: invalid HASH ID %d\n", 181 chap->qid, data->hashid); 182 chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 183 return -EPROTO; 184 } 185 186 if (chap->hash_id == data->hashid && chap->shash_tfm && 187 !strcmp(crypto_shash_alg_name(chap->shash_tfm), hmac_name) && 188 crypto_shash_digestsize(chap->shash_tfm) == data->hl) { 189 dev_dbg(ctrl->device, 190 "qid %d: reuse existing hash %s\n", 191 chap->qid, hmac_name); 192 goto select_kpp; 193 } 194 195 /* Reset if hash cannot be reused */ 196 if (chap->shash_tfm) { 197 crypto_free_shash(chap->shash_tfm); 198 chap->hash_id = 0; 199 chap->hash_len = 0; 200 } 201 chap->shash_tfm = crypto_alloc_shash(hmac_name, 0, 202 CRYPTO_ALG_ALLOCATES_MEMORY); 203 if (IS_ERR(chap->shash_tfm)) { 204 dev_warn(ctrl->device, 205 "qid %d: failed to allocate hash %s, error %ld\n", 206 chap->qid, hmac_name, PTR_ERR(chap->shash_tfm)); 207 chap->shash_tfm = NULL; 208 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 209 return -ENOMEM; 210 } 211 212 if (crypto_shash_digestsize(chap->shash_tfm) != data->hl) { 213 dev_warn(ctrl->device, 214 "qid %d: invalid hash length %d\n", 215 chap->qid, data->hl); 216 crypto_free_shash(chap->shash_tfm); 217 chap->shash_tfm = NULL; 218 chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 219 return -EPROTO; 220 } 221 222 chap->hash_id = data->hashid; 223 chap->hash_len = data->hl; 224 dev_dbg(ctrl->device, "qid %d: selected hash %s\n", 225 chap->qid, hmac_name); 226 227 select_kpp: 228 kpp_name = nvme_auth_dhgroup_kpp(data->dhgid); 229 if (!kpp_name) { 230 dev_warn(ctrl->device, 231 "qid %d: invalid DH group id %d\n", 232 chap->qid, data->dhgid); 233 chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; 234 /* Leave previous dh_tfm intact */ 235 return -EPROTO; 236 } 237 238 if (chap->dhgroup_id == data->dhgid && 239 (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) { 240 dev_dbg(ctrl->device, 241 "qid %d: reuse existing DH group %s\n", 242 chap->qid, gid_name); 243 goto skip_kpp; 244 } 245 246 /* Reset dh_tfm if it can't be reused */ 247 if (chap->dh_tfm) { 248 crypto_free_kpp(chap->dh_tfm); 249 chap->dh_tfm = NULL; 250 } 251 252 if (data->dhgid != NVME_AUTH_DHGROUP_NULL) { 253 if (dhvlen == 0) { 254 dev_warn(ctrl->device, 255 "qid %d: empty DH value\n", 256 chap->qid); 257 chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; 258 return -EPROTO; 259 } 260 261 chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0); 262 if (IS_ERR(chap->dh_tfm)) { 263 int ret = PTR_ERR(chap->dh_tfm); 264 265 dev_warn(ctrl->device, 266 "qid %d: error %d initializing DH group %s\n", 267 chap->qid, ret, gid_name); 268 chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; 269 chap->dh_tfm = NULL; 270 return ret; 271 } 272 dev_dbg(ctrl->device, "qid %d: selected DH group %s\n", 273 chap->qid, gid_name); 274 } else if (dhvlen != 0) { 275 dev_warn(ctrl->device, 276 "qid %d: invalid DH value for NULL DH\n", 277 chap->qid); 278 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 279 return -EPROTO; 280 } 281 chap->dhgroup_id = data->dhgid; 282 283 skip_kpp: 284 chap->s1 = le32_to_cpu(data->seqnum); 285 memcpy(chap->c1, data->cval, chap->hash_len); 286 if (dhvlen) { 287 chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL); 288 if (!chap->ctrl_key) { 289 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 290 return -ENOMEM; 291 } 292 chap->ctrl_key_len = dhvlen; 293 memcpy(chap->ctrl_key, data->cval + chap->hash_len, 294 dhvlen); 295 dev_dbg(ctrl->device, "ctrl public key %*ph\n", 296 (int)chap->ctrl_key_len, chap->ctrl_key); 297 } 298 299 return 0; 300 } 301 302 static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl, 303 struct nvme_dhchap_queue_context *chap) 304 { 305 struct nvmf_auth_dhchap_reply_data *data = chap->buf; 306 size_t size = sizeof(*data); 307 308 size += 2 * chap->hash_len; 309 310 if (chap->host_key_len) 311 size += chap->host_key_len; 312 313 if (size > CHAP_BUF_SIZE) { 314 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 315 return -EINVAL; 316 } 317 318 memset(chap->buf, 0, size); 319 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; 320 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY; 321 data->t_id = cpu_to_le16(chap->transaction); 322 data->hl = chap->hash_len; 323 data->dhvlen = cpu_to_le16(chap->host_key_len); 324 memcpy(data->rval, chap->response, chap->hash_len); 325 if (ctrl->ctrl_key) 326 chap->bi_directional = true; 327 if (ctrl->ctrl_key || ctrl->opts->concat) { 328 get_random_bytes(chap->c2, chap->hash_len); 329 data->cvalid = 1; 330 memcpy(data->rval + chap->hash_len, chap->c2, 331 chap->hash_len); 332 dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n", 333 __func__, chap->qid, (int)chap->hash_len, chap->c2); 334 } else { 335 memset(chap->c2, 0, chap->hash_len); 336 } 337 if (ctrl->opts->concat) { 338 chap->s2 = 0; 339 chap->bi_directional = false; 340 } else 341 chap->s2 = nvme_auth_get_seqnum(); 342 data->seqnum = cpu_to_le32(chap->s2); 343 if (chap->host_key_len) { 344 dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n", 345 __func__, chap->qid, 346 chap->host_key_len, chap->host_key); 347 memcpy(data->rval + 2 * chap->hash_len, chap->host_key, 348 chap->host_key_len); 349 } 350 351 return size; 352 } 353 354 static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl, 355 struct nvme_dhchap_queue_context *chap) 356 { 357 struct nvmf_auth_dhchap_success1_data *data = chap->buf; 358 size_t size = sizeof(*data) + chap->hash_len; 359 360 if (size > CHAP_BUF_SIZE) { 361 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 362 return -EINVAL; 363 } 364 365 if (data->hl != chap->hash_len) { 366 dev_warn(ctrl->device, 367 "qid %d: invalid hash length %u\n", 368 chap->qid, data->hl); 369 chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 370 return -EPROTO; 371 } 372 373 /* Just print out information for the admin queue */ 374 if (chap->qid == 0) 375 dev_info(ctrl->device, 376 "qid 0: authenticated with hash %s dhgroup %s\n", 377 nvme_auth_hmac_name(chap->hash_id), 378 nvme_auth_dhgroup_name(chap->dhgroup_id)); 379 380 if (!data->rvalid) 381 return 0; 382 383 /* Validate controller response */ 384 if (memcmp(chap->response, data->rval, data->hl)) { 385 dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n", 386 __func__, chap->qid, (int)chap->hash_len, data->rval); 387 dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n", 388 __func__, chap->qid, (int)chap->hash_len, 389 chap->response); 390 dev_warn(ctrl->device, 391 "qid %d: controller authentication failed\n", 392 chap->qid); 393 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 394 return -ECONNREFUSED; 395 } 396 397 /* Just print out information for the admin queue */ 398 if (chap->qid == 0) 399 dev_info(ctrl->device, 400 "qid 0: controller authenticated\n"); 401 return 0; 402 } 403 404 static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl, 405 struct nvme_dhchap_queue_context *chap) 406 { 407 struct nvmf_auth_dhchap_success2_data *data = chap->buf; 408 size_t size = sizeof(*data); 409 410 memset(chap->buf, 0, size); 411 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; 412 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2; 413 data->t_id = cpu_to_le16(chap->transaction); 414 415 return size; 416 } 417 418 static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl, 419 struct nvme_dhchap_queue_context *chap) 420 { 421 struct nvmf_auth_dhchap_failure_data *data = chap->buf; 422 size_t size = sizeof(*data); 423 424 memset(chap->buf, 0, size); 425 data->auth_type = NVME_AUTH_COMMON_MESSAGES; 426 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2; 427 data->t_id = cpu_to_le16(chap->transaction); 428 data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED; 429 data->rescode_exp = chap->status; 430 431 return size; 432 } 433 434 static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl, 435 struct nvme_dhchap_queue_context *chap) 436 { 437 SHASH_DESC_ON_STACK(shash, chap->shash_tfm); 438 u8 buf[4], *challenge = chap->c1; 439 int ret; 440 441 dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n", 442 __func__, chap->qid, chap->s1, chap->transaction); 443 444 if (!chap->transformed_key) { 445 chap->transformed_key = nvme_auth_transform_key(ctrl->host_key, 446 ctrl->opts->host->nqn); 447 if (IS_ERR(chap->transformed_key)) { 448 ret = PTR_ERR(chap->transformed_key); 449 chap->transformed_key = NULL; 450 return ret; 451 } 452 } else { 453 dev_dbg(ctrl->device, "%s: qid %d re-using host response\n", 454 __func__, chap->qid); 455 } 456 457 ret = crypto_shash_setkey(chap->shash_tfm, 458 chap->transformed_key->key, chap->transformed_key->len); 459 if (ret) { 460 dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n", 461 chap->qid, ret); 462 goto out; 463 } 464 465 if (chap->dh_tfm) { 466 challenge = kmalloc(chap->hash_len, GFP_KERNEL); 467 if (!challenge) { 468 ret = -ENOMEM; 469 goto out; 470 } 471 ret = nvme_auth_augmented_challenge(chap->hash_id, 472 chap->sess_key, 473 chap->sess_key_len, 474 chap->c1, challenge, 475 chap->hash_len); 476 if (ret) 477 goto out; 478 } 479 480 shash->tfm = chap->shash_tfm; 481 ret = crypto_shash_init(shash); 482 if (ret) 483 goto out; 484 ret = crypto_shash_update(shash, challenge, chap->hash_len); 485 if (ret) 486 goto out; 487 put_unaligned_le32(chap->s1, buf); 488 ret = crypto_shash_update(shash, buf, 4); 489 if (ret) 490 goto out; 491 put_unaligned_le16(chap->transaction, buf); 492 ret = crypto_shash_update(shash, buf, 2); 493 if (ret) 494 goto out; 495 *buf = chap->sc_c; 496 ret = crypto_shash_update(shash, buf, 1); 497 if (ret) 498 goto out; 499 ret = crypto_shash_update(shash, "HostHost", 8); 500 if (ret) 501 goto out; 502 ret = crypto_shash_update(shash, ctrl->opts->host->nqn, 503 strlen(ctrl->opts->host->nqn)); 504 if (ret) 505 goto out; 506 memset(buf, 0, sizeof(buf)); 507 ret = crypto_shash_update(shash, buf, 1); 508 if (ret) 509 goto out; 510 ret = crypto_shash_update(shash, ctrl->opts->subsysnqn, 511 strlen(ctrl->opts->subsysnqn)); 512 if (ret) 513 goto out; 514 ret = crypto_shash_final(shash, chap->response); 515 out: 516 if (challenge != chap->c1) 517 kfree(challenge); 518 return ret; 519 } 520 521 static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl, 522 struct nvme_dhchap_queue_context *chap) 523 { 524 SHASH_DESC_ON_STACK(shash, chap->shash_tfm); 525 struct nvme_dhchap_key *transformed_key; 526 u8 buf[4], *challenge = chap->c2; 527 int ret; 528 529 transformed_key = nvme_auth_transform_key(ctrl->ctrl_key, 530 ctrl->opts->subsysnqn); 531 if (IS_ERR(transformed_key)) { 532 ret = PTR_ERR(transformed_key); 533 return ret; 534 } 535 536 ret = crypto_shash_setkey(chap->shash_tfm, 537 transformed_key->key, transformed_key->len); 538 if (ret) { 539 dev_warn(ctrl->device, "qid %d: failed to set key, error %d\n", 540 chap->qid, ret); 541 goto out; 542 } 543 544 if (chap->dh_tfm) { 545 challenge = kmalloc(chap->hash_len, GFP_KERNEL); 546 if (!challenge) { 547 ret = -ENOMEM; 548 goto out; 549 } 550 ret = nvme_auth_augmented_challenge(chap->hash_id, 551 chap->sess_key, 552 chap->sess_key_len, 553 chap->c2, challenge, 554 chap->hash_len); 555 if (ret) 556 goto out; 557 } 558 dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n", 559 __func__, chap->qid, chap->s2, chap->transaction); 560 dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n", 561 __func__, chap->qid, (int)chap->hash_len, challenge); 562 dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n", 563 __func__, chap->qid, ctrl->opts->subsysnqn); 564 dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n", 565 __func__, chap->qid, ctrl->opts->host->nqn); 566 shash->tfm = chap->shash_tfm; 567 ret = crypto_shash_init(shash); 568 if (ret) 569 goto out; 570 ret = crypto_shash_update(shash, challenge, chap->hash_len); 571 if (ret) 572 goto out; 573 put_unaligned_le32(chap->s2, buf); 574 ret = crypto_shash_update(shash, buf, 4); 575 if (ret) 576 goto out; 577 put_unaligned_le16(chap->transaction, buf); 578 ret = crypto_shash_update(shash, buf, 2); 579 if (ret) 580 goto out; 581 memset(buf, 0, 4); 582 ret = crypto_shash_update(shash, buf, 1); 583 if (ret) 584 goto out; 585 ret = crypto_shash_update(shash, "Controller", 10); 586 if (ret) 587 goto out; 588 ret = crypto_shash_update(shash, ctrl->opts->subsysnqn, 589 strlen(ctrl->opts->subsysnqn)); 590 if (ret) 591 goto out; 592 ret = crypto_shash_update(shash, buf, 1); 593 if (ret) 594 goto out; 595 ret = crypto_shash_update(shash, ctrl->opts->host->nqn, 596 strlen(ctrl->opts->host->nqn)); 597 if (ret) 598 goto out; 599 ret = crypto_shash_final(shash, chap->response); 600 out: 601 if (challenge != chap->c2) 602 kfree(challenge); 603 nvme_auth_free_key(transformed_key); 604 return ret; 605 } 606 607 static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl, 608 struct nvme_dhchap_queue_context *chap) 609 { 610 int ret; 611 612 if (chap->host_key && chap->host_key_len) { 613 dev_dbg(ctrl->device, 614 "qid %d: reusing host key\n", chap->qid); 615 goto gen_sesskey; 616 } 617 ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id); 618 if (ret < 0) { 619 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 620 return ret; 621 } 622 623 chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm); 624 625 chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL); 626 if (!chap->host_key) { 627 chap->host_key_len = 0; 628 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 629 return -ENOMEM; 630 } 631 ret = nvme_auth_gen_pubkey(chap->dh_tfm, 632 chap->host_key, chap->host_key_len); 633 if (ret) { 634 dev_dbg(ctrl->device, 635 "failed to generate public key, error %d\n", ret); 636 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 637 return ret; 638 } 639 640 gen_sesskey: 641 chap->sess_key_len = chap->host_key_len; 642 chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL); 643 if (!chap->sess_key) { 644 chap->sess_key_len = 0; 645 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 646 return -ENOMEM; 647 } 648 649 ret = nvme_auth_gen_shared_secret(chap->dh_tfm, 650 chap->ctrl_key, chap->ctrl_key_len, 651 chap->sess_key, chap->sess_key_len); 652 if (ret) { 653 dev_dbg(ctrl->device, 654 "failed to generate shared secret, error %d\n", ret); 655 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 656 return ret; 657 } 658 dev_dbg(ctrl->device, "shared secret %*ph\n", 659 (int)chap->sess_key_len, chap->sess_key); 660 return 0; 661 } 662 663 static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap) 664 { 665 nvme_auth_free_key(chap->transformed_key); 666 chap->transformed_key = NULL; 667 kfree_sensitive(chap->host_key); 668 chap->host_key = NULL; 669 chap->host_key_len = 0; 670 kfree_sensitive(chap->ctrl_key); 671 chap->ctrl_key = NULL; 672 chap->ctrl_key_len = 0; 673 kfree_sensitive(chap->sess_key); 674 chap->sess_key = NULL; 675 chap->sess_key_len = 0; 676 chap->status = 0; 677 chap->error = 0; 678 chap->s1 = 0; 679 chap->s2 = 0; 680 chap->bi_directional = false; 681 chap->transaction = 0; 682 memset(chap->c1, 0, sizeof(chap->c1)); 683 memset(chap->c2, 0, sizeof(chap->c2)); 684 mempool_free(chap->buf, nvme_chap_buf_pool); 685 chap->buf = NULL; 686 } 687 688 static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap) 689 { 690 nvme_auth_reset_dhchap(chap); 691 chap->authenticated = false; 692 if (chap->shash_tfm) 693 crypto_free_shash(chap->shash_tfm); 694 if (chap->dh_tfm) 695 crypto_free_kpp(chap->dh_tfm); 696 } 697 698 void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl) 699 { 700 dev_dbg(ctrl->device, "Wipe generated TLS PSK %08x\n", 701 key_serial(ctrl->opts->tls_key)); 702 key_revoke(ctrl->opts->tls_key); 703 key_put(ctrl->opts->tls_key); 704 ctrl->opts->tls_key = NULL; 705 } 706 EXPORT_SYMBOL_GPL(nvme_auth_revoke_tls_key); 707 708 static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl, 709 struct nvme_dhchap_queue_context *chap) 710 { 711 u8 *psk, *tls_psk; 712 char *digest; 713 struct key *tls_key; 714 size_t psk_len; 715 int ret = 0; 716 717 if (!chap->sess_key) { 718 dev_warn(ctrl->device, 719 "%s: qid %d no session key negotiated\n", 720 __func__, chap->qid); 721 return -ENOKEY; 722 } 723 724 if (chap->qid) { 725 dev_warn(ctrl->device, 726 "qid %d: secure concatenation not supported on I/O queues\n", 727 chap->qid); 728 return -EINVAL; 729 } 730 ret = nvme_auth_generate_psk(chap->hash_id, chap->sess_key, 731 chap->sess_key_len, 732 chap->c1, chap->c2, 733 chap->hash_len, &psk, &psk_len); 734 if (ret) { 735 dev_warn(ctrl->device, 736 "%s: qid %d failed to generate PSK, error %d\n", 737 __func__, chap->qid, ret); 738 return ret; 739 } 740 dev_dbg(ctrl->device, 741 "%s: generated psk %*ph\n", __func__, (int)psk_len, psk); 742 743 ret = nvme_auth_generate_digest(chap->hash_id, psk, psk_len, 744 ctrl->opts->subsysnqn, 745 ctrl->opts->host->nqn, &digest); 746 if (ret) { 747 dev_warn(ctrl->device, 748 "%s: qid %d failed to generate digest, error %d\n", 749 __func__, chap->qid, ret); 750 goto out_free_psk; 751 } 752 dev_dbg(ctrl->device, "%s: generated digest %s\n", 753 __func__, digest); 754 ret = nvme_auth_derive_tls_psk(chap->hash_id, psk, psk_len, 755 digest, &tls_psk); 756 if (ret) { 757 dev_warn(ctrl->device, 758 "%s: qid %d failed to derive TLS psk, error %d\n", 759 __func__, chap->qid, ret); 760 goto out_free_digest; 761 } 762 763 tls_key = nvme_tls_psk_refresh(ctrl->opts->keyring, 764 ctrl->opts->host->nqn, 765 ctrl->opts->subsysnqn, chap->hash_id, 766 tls_psk, psk_len, digest); 767 if (IS_ERR(tls_key)) { 768 ret = PTR_ERR(tls_key); 769 dev_warn(ctrl->device, 770 "%s: qid %d failed to insert generated key, error %d\n", 771 __func__, chap->qid, ret); 772 tls_key = NULL; 773 } 774 kfree_sensitive(tls_psk); 775 if (ctrl->opts->tls_key) 776 nvme_auth_revoke_tls_key(ctrl); 777 ctrl->opts->tls_key = tls_key; 778 out_free_digest: 779 kfree_sensitive(digest); 780 out_free_psk: 781 kfree_sensitive(psk); 782 return ret; 783 } 784 785 static void nvme_queue_auth_work(struct work_struct *work) 786 { 787 struct nvme_dhchap_queue_context *chap = 788 container_of(work, struct nvme_dhchap_queue_context, auth_work); 789 struct nvme_ctrl *ctrl = chap->ctrl; 790 size_t tl; 791 int ret = 0; 792 793 /* 794 * Allocate a large enough buffer for the entire negotiation: 795 * 4k is enough to ffdhe8192. 796 */ 797 chap->buf = mempool_alloc(nvme_chap_buf_pool, GFP_KERNEL); 798 if (!chap->buf) { 799 chap->error = -ENOMEM; 800 return; 801 } 802 803 chap->transaction = ctrl->transaction++; 804 805 /* DH-HMAC-CHAP Step 1: send negotiate */ 806 dev_dbg(ctrl->device, "%s: qid %d send negotiate\n", 807 __func__, chap->qid); 808 ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap); 809 if (ret < 0) { 810 chap->error = ret; 811 return; 812 } 813 tl = ret; 814 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); 815 if (ret) { 816 chap->error = ret; 817 return; 818 } 819 820 /* DH-HMAC-CHAP Step 2: receive challenge */ 821 dev_dbg(ctrl->device, "%s: qid %d receive challenge\n", 822 __func__, chap->qid); 823 824 memset(chap->buf, 0, CHAP_BUF_SIZE); 825 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE, 826 false); 827 if (ret) { 828 dev_warn(ctrl->device, 829 "qid %d failed to receive challenge, %s %d\n", 830 chap->qid, ret < 0 ? "error" : "nvme status", ret); 831 chap->error = ret; 832 return; 833 } 834 ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction, 835 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE); 836 if (ret) { 837 chap->status = ret; 838 chap->error = -EKEYREJECTED; 839 return; 840 } 841 842 ret = nvme_auth_process_dhchap_challenge(ctrl, chap); 843 if (ret) { 844 /* Invalid challenge parameters */ 845 chap->error = ret; 846 goto fail2; 847 } 848 849 if (chap->ctrl_key_len) { 850 dev_dbg(ctrl->device, 851 "%s: qid %d DH exponential\n", 852 __func__, chap->qid); 853 ret = nvme_auth_dhchap_exponential(ctrl, chap); 854 if (ret) { 855 chap->error = ret; 856 goto fail2; 857 } 858 } 859 860 dev_dbg(ctrl->device, "%s: qid %d host response\n", 861 __func__, chap->qid); 862 mutex_lock(&ctrl->dhchap_auth_mutex); 863 ret = nvme_auth_dhchap_setup_host_response(ctrl, chap); 864 mutex_unlock(&ctrl->dhchap_auth_mutex); 865 if (ret) { 866 chap->error = ret; 867 goto fail2; 868 } 869 870 /* DH-HMAC-CHAP Step 3: send reply */ 871 dev_dbg(ctrl->device, "%s: qid %d send reply\n", 872 __func__, chap->qid); 873 ret = nvme_auth_set_dhchap_reply_data(ctrl, chap); 874 if (ret < 0) { 875 chap->error = ret; 876 goto fail2; 877 } 878 879 tl = ret; 880 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); 881 if (ret) { 882 chap->error = ret; 883 goto fail2; 884 } 885 886 /* DH-HMAC-CHAP Step 4: receive success1 */ 887 dev_dbg(ctrl->device, "%s: qid %d receive success1\n", 888 __func__, chap->qid); 889 890 memset(chap->buf, 0, CHAP_BUF_SIZE); 891 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE, 892 false); 893 if (ret) { 894 dev_warn(ctrl->device, 895 "qid %d failed to receive success1, %s %d\n", 896 chap->qid, ret < 0 ? "error" : "nvme status", ret); 897 chap->error = ret; 898 return; 899 } 900 ret = nvme_auth_receive_validate(ctrl, chap->qid, 901 chap->buf, chap->transaction, 902 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1); 903 if (ret) { 904 chap->status = ret; 905 chap->error = -EKEYREJECTED; 906 return; 907 } 908 909 mutex_lock(&ctrl->dhchap_auth_mutex); 910 if (ctrl->ctrl_key) { 911 dev_dbg(ctrl->device, 912 "%s: qid %d controller response\n", 913 __func__, chap->qid); 914 ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap); 915 if (ret) { 916 mutex_unlock(&ctrl->dhchap_auth_mutex); 917 chap->error = ret; 918 goto fail2; 919 } 920 } 921 mutex_unlock(&ctrl->dhchap_auth_mutex); 922 923 ret = nvme_auth_process_dhchap_success1(ctrl, chap); 924 if (ret) { 925 /* Controller authentication failed */ 926 chap->error = -EKEYREJECTED; 927 goto fail2; 928 } 929 930 if (chap->bi_directional) { 931 /* DH-HMAC-CHAP Step 5: send success2 */ 932 dev_dbg(ctrl->device, "%s: qid %d send success2\n", 933 __func__, chap->qid); 934 tl = nvme_auth_set_dhchap_success2_data(ctrl, chap); 935 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); 936 if (ret) 937 chap->error = ret; 938 } 939 if (!ret) { 940 chap->error = 0; 941 chap->authenticated = true; 942 if (ctrl->opts->concat && 943 (ret = nvme_auth_secure_concat(ctrl, chap))) { 944 dev_warn(ctrl->device, 945 "%s: qid %d failed to enable secure concatenation\n", 946 __func__, chap->qid); 947 chap->error = ret; 948 chap->authenticated = false; 949 } 950 return; 951 } 952 953 fail2: 954 if (chap->status == 0) 955 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 956 dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n", 957 __func__, chap->qid, chap->status); 958 tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap); 959 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); 960 /* 961 * only update error if send failure2 failed and no other 962 * error had been set during authentication. 963 */ 964 if (ret && !chap->error) 965 chap->error = ret; 966 } 967 968 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid) 969 { 970 struct nvme_dhchap_queue_context *chap; 971 972 if (!ctrl->host_key) { 973 dev_warn(ctrl->device, "qid %d: no key\n", qid); 974 return -ENOKEY; 975 } 976 977 if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) { 978 dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid); 979 return -ENOKEY; 980 } 981 982 chap = &ctrl->dhchap_ctxs[qid]; 983 cancel_work_sync(&chap->auth_work); 984 queue_work(nvme_auth_wq, &chap->auth_work); 985 return 0; 986 } 987 EXPORT_SYMBOL_GPL(nvme_auth_negotiate); 988 989 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid) 990 { 991 struct nvme_dhchap_queue_context *chap; 992 int ret; 993 994 chap = &ctrl->dhchap_ctxs[qid]; 995 flush_work(&chap->auth_work); 996 ret = chap->error; 997 /* clear sensitive info */ 998 nvme_auth_reset_dhchap(chap); 999 return ret; 1000 } 1001 EXPORT_SYMBOL_GPL(nvme_auth_wait); 1002 1003 static void nvme_ctrl_auth_work(struct work_struct *work) 1004 { 1005 struct nvme_ctrl *ctrl = 1006 container_of(work, struct nvme_ctrl, dhchap_auth_work); 1007 int ret, q; 1008 1009 /* 1010 * If the ctrl is no connected, bail as reconnect will handle 1011 * authentication. 1012 */ 1013 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) 1014 return; 1015 1016 /* Authenticate admin queue first */ 1017 ret = nvme_auth_negotiate(ctrl, 0); 1018 if (ret) { 1019 dev_warn(ctrl->device, 1020 "qid 0: error %d setting up authentication\n", ret); 1021 return; 1022 } 1023 ret = nvme_auth_wait(ctrl, 0); 1024 if (ret) { 1025 dev_warn(ctrl->device, 1026 "qid 0: authentication failed\n"); 1027 return; 1028 } 1029 /* 1030 * Only run authentication on the admin queue for secure concatenation. 1031 */ 1032 if (ctrl->opts->concat) 1033 return; 1034 1035 for (q = 1; q < ctrl->queue_count; q++) { 1036 struct nvme_dhchap_queue_context *chap = 1037 &ctrl->dhchap_ctxs[q]; 1038 /* 1039 * Skip re-authentication if the queue had 1040 * not been authenticated initially. 1041 */ 1042 if (!chap->authenticated) 1043 continue; 1044 cancel_work_sync(&chap->auth_work); 1045 queue_work(nvme_auth_wq, &chap->auth_work); 1046 } 1047 1048 /* 1049 * Failure is a soft-state; credentials remain valid until 1050 * the controller terminates the connection. 1051 */ 1052 for (q = 1; q < ctrl->queue_count; q++) { 1053 struct nvme_dhchap_queue_context *chap = 1054 &ctrl->dhchap_ctxs[q]; 1055 if (!chap->authenticated) 1056 continue; 1057 flush_work(&chap->auth_work); 1058 ret = chap->error; 1059 nvme_auth_reset_dhchap(chap); 1060 if (ret) 1061 dev_warn(ctrl->device, 1062 "qid %d: authentication failed\n", q); 1063 } 1064 } 1065 1066 int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) 1067 { 1068 struct nvme_dhchap_queue_context *chap; 1069 int i, ret; 1070 1071 mutex_init(&ctrl->dhchap_auth_mutex); 1072 INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work); 1073 if (!ctrl->opts) 1074 return 0; 1075 ret = nvme_auth_generate_key(ctrl->opts->dhchap_secret, 1076 &ctrl->host_key); 1077 if (ret) 1078 return ret; 1079 ret = nvme_auth_generate_key(ctrl->opts->dhchap_ctrl_secret, 1080 &ctrl->ctrl_key); 1081 if (ret) 1082 goto err_free_dhchap_secret; 1083 1084 if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret) 1085 return 0; 1086 1087 ctrl->dhchap_ctxs = kvzalloc_objs(*chap, ctrl_max_dhchaps(ctrl)); 1088 if (!ctrl->dhchap_ctxs) { 1089 ret = -ENOMEM; 1090 goto err_free_dhchap_ctrl_secret; 1091 } 1092 1093 for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) { 1094 chap = &ctrl->dhchap_ctxs[i]; 1095 chap->qid = i; 1096 chap->ctrl = ctrl; 1097 chap->authenticated = false; 1098 INIT_WORK(&chap->auth_work, nvme_queue_auth_work); 1099 } 1100 1101 return 0; 1102 err_free_dhchap_ctrl_secret: 1103 nvme_auth_free_key(ctrl->ctrl_key); 1104 ctrl->ctrl_key = NULL; 1105 err_free_dhchap_secret: 1106 nvme_auth_free_key(ctrl->host_key); 1107 ctrl->host_key = NULL; 1108 return ret; 1109 } 1110 EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl); 1111 1112 void nvme_auth_stop(struct nvme_ctrl *ctrl) 1113 { 1114 cancel_work_sync(&ctrl->dhchap_auth_work); 1115 } 1116 EXPORT_SYMBOL_GPL(nvme_auth_stop); 1117 1118 void nvme_auth_free(struct nvme_ctrl *ctrl) 1119 { 1120 int i; 1121 1122 if (ctrl->dhchap_ctxs) { 1123 for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) 1124 nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]); 1125 kvfree(ctrl->dhchap_ctxs); 1126 } 1127 if (ctrl->host_key) { 1128 nvme_auth_free_key(ctrl->host_key); 1129 ctrl->host_key = NULL; 1130 } 1131 if (ctrl->ctrl_key) { 1132 nvme_auth_free_key(ctrl->ctrl_key); 1133 ctrl->ctrl_key = NULL; 1134 } 1135 } 1136 EXPORT_SYMBOL_GPL(nvme_auth_free); 1137 1138 int __init nvme_init_auth(void) 1139 { 1140 nvme_auth_wq = alloc_workqueue("nvme-auth-wq", 1141 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 1142 if (!nvme_auth_wq) 1143 return -ENOMEM; 1144 1145 nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache", 1146 CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL); 1147 if (!nvme_chap_buf_cache) 1148 goto err_destroy_workqueue; 1149 1150 nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab, 1151 mempool_free_slab, nvme_chap_buf_cache); 1152 if (!nvme_chap_buf_pool) 1153 goto err_destroy_chap_buf_cache; 1154 1155 return 0; 1156 err_destroy_chap_buf_cache: 1157 kmem_cache_destroy(nvme_chap_buf_cache); 1158 err_destroy_workqueue: 1159 destroy_workqueue(nvme_auth_wq); 1160 return -ENOMEM; 1161 } 1162 1163 void __exit nvme_exit_auth(void) 1164 { 1165 mempool_destroy(nvme_chap_buf_pool); 1166 kmem_cache_destroy(nvme_chap_buf_cache); 1167 destroy_workqueue(nvme_auth_wq); 1168 } 1169