1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2020 Hannes Reinecke, SUSE Linux 4 */ 5 6 #include <linux/crc32.h> 7 #include <linux/base64.h> 8 #include <linux/prandom.h> 9 #include <linux/unaligned.h> 10 #include <crypto/dh.h> 11 #include "nvme.h" 12 #include "fabrics.h" 13 #include <linux/nvme-auth.h> 14 #include <linux/nvme-keyring.h> 15 16 #define CHAP_BUF_SIZE 4096 17 static struct kmem_cache *nvme_chap_buf_cache; 18 static mempool_t *nvme_chap_buf_pool; 19 20 struct nvme_dhchap_queue_context { 21 struct list_head entry; 22 struct work_struct auth_work; 23 struct nvme_ctrl *ctrl; 24 struct crypto_kpp *dh_tfm; 25 struct nvme_dhchap_key *transformed_key; 26 void *buf; 27 int qid; 28 int error; 29 u32 s1; 30 u32 s2; 31 bool bi_directional; 32 bool authenticated; 33 u16 transaction; 34 u8 status; 35 u8 dhgroup_id; 36 u8 hash_id; 37 u8 sc_c; 38 size_t hash_len; 39 u8 c1[NVME_AUTH_MAX_DIGEST_SIZE]; 40 u8 c2[NVME_AUTH_MAX_DIGEST_SIZE]; 41 u8 response[NVME_AUTH_MAX_DIGEST_SIZE]; 42 u8 *ctrl_key; 43 u8 *host_key; 44 u8 *sess_key; 45 int ctrl_key_len; 46 int host_key_len; 47 int sess_key_len; 48 }; 49 50 static struct workqueue_struct *nvme_auth_wq; 51 52 static inline int ctrl_max_dhchaps(struct nvme_ctrl *ctrl) 53 { 54 return ctrl->opts->nr_io_queues + ctrl->opts->nr_write_queues + 55 ctrl->opts->nr_poll_queues + 1; 56 } 57 58 static int nvme_auth_submit(struct nvme_ctrl *ctrl, int qid, 59 void *data, size_t data_len, bool auth_send) 60 { 61 struct nvme_command cmd = {}; 62 nvme_submit_flags_t flags = NVME_SUBMIT_RETRY; 63 struct request_queue *q = ctrl->fabrics_q; 64 int ret; 65 66 if (qid != 0) { 67 flags |= NVME_SUBMIT_NOWAIT | NVME_SUBMIT_RESERVED; 68 q = ctrl->connect_q; 69 } 70 71 cmd.auth_common.opcode = nvme_fabrics_command; 72 cmd.auth_common.secp = NVME_AUTH_DHCHAP_PROTOCOL_IDENTIFIER; 73 cmd.auth_common.spsp0 = 0x01; 74 cmd.auth_common.spsp1 = 0x01; 75 if (auth_send) { 76 cmd.auth_send.fctype = nvme_fabrics_type_auth_send; 77 cmd.auth_send.tl = cpu_to_le32(data_len); 78 } else { 79 cmd.auth_receive.fctype = nvme_fabrics_type_auth_receive; 80 cmd.auth_receive.al = cpu_to_le32(data_len); 81 } 82 83 ret = __nvme_submit_sync_cmd(q, &cmd, NULL, data, data_len, 84 qid == 0 ? NVME_QID_ANY : qid, flags); 85 if (ret > 0) 86 dev_warn(ctrl->device, 87 "qid %d auth_send failed with status %d\n", qid, ret); 88 else if (ret < 0) 89 dev_err(ctrl->device, 90 "qid %d auth_send failed with error %d\n", qid, ret); 91 return ret; 92 } 93 94 static int nvme_auth_receive_validate(struct nvme_ctrl *ctrl, int qid, 95 struct nvmf_auth_dhchap_failure_data *data, 96 u16 transaction, u8 expected_msg) 97 { 98 dev_dbg(ctrl->device, "%s: qid %d auth_type %d auth_id %x\n", 99 __func__, qid, data->auth_type, data->auth_id); 100 101 if (data->auth_type == NVME_AUTH_COMMON_MESSAGES && 102 data->auth_id == NVME_AUTH_DHCHAP_MESSAGE_FAILURE1) { 103 return data->rescode_exp; 104 } 105 if (data->auth_type != NVME_AUTH_DHCHAP_MESSAGES || 106 data->auth_id != expected_msg) { 107 dev_warn(ctrl->device, 108 "qid %d invalid message %02x/%02x\n", 109 qid, data->auth_type, data->auth_id); 110 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; 111 } 112 if (le16_to_cpu(data->t_id) != transaction) { 113 dev_warn(ctrl->device, 114 "qid %d invalid transaction ID %d\n", 115 qid, le16_to_cpu(data->t_id)); 116 return NVME_AUTH_DHCHAP_FAILURE_INCORRECT_MESSAGE; 117 } 118 return 0; 119 } 120 121 static int nvme_auth_set_dhchap_negotiate_data(struct nvme_ctrl *ctrl, 122 struct nvme_dhchap_queue_context *chap) 123 { 124 struct nvmf_auth_dhchap_negotiate_data *data = chap->buf; 125 size_t size = sizeof(*data) + sizeof(union nvmf_auth_protocol); 126 u8 dh_list_offset = NVME_AUTH_DHCHAP_MAX_DH_IDS; 127 u8 *idlist = data->auth_protocol[0].dhchap.idlist; 128 129 if (size > CHAP_BUF_SIZE) { 130 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 131 return -EINVAL; 132 } 133 memset((u8 *)chap->buf, 0, size); 134 data->auth_type = NVME_AUTH_COMMON_MESSAGES; 135 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_NEGOTIATE; 136 data->t_id = cpu_to_le16(chap->transaction); 137 if (ctrl->opts->concat && chap->qid == 0) { 138 if (ctrl->opts->tls_key) 139 data->sc_c = NVME_AUTH_SECP_REPLACETLSPSK; 140 else 141 data->sc_c = NVME_AUTH_SECP_NEWTLSPSK; 142 } else 143 data->sc_c = NVME_AUTH_SECP_NOSC; 144 chap->sc_c = data->sc_c; 145 data->napd = 1; 146 data->auth_protocol[0].dhchap.authid = NVME_AUTH_DHCHAP_AUTH_ID; 147 data->auth_protocol[0].dhchap.halen = 3; 148 idlist[0] = NVME_AUTH_HASH_SHA256; 149 idlist[1] = NVME_AUTH_HASH_SHA384; 150 idlist[2] = NVME_AUTH_HASH_SHA512; 151 if (chap->sc_c == NVME_AUTH_SECP_NOSC) 152 idlist[dh_list_offset++] = NVME_AUTH_DHGROUP_NULL; 153 idlist[dh_list_offset++] = NVME_AUTH_DHGROUP_2048; 154 idlist[dh_list_offset++] = NVME_AUTH_DHGROUP_3072; 155 idlist[dh_list_offset++] = NVME_AUTH_DHGROUP_4096; 156 idlist[dh_list_offset++] = NVME_AUTH_DHGROUP_6144; 157 idlist[dh_list_offset++] = NVME_AUTH_DHGROUP_8192; 158 data->auth_protocol[0].dhchap.dhlen = 159 dh_list_offset - NVME_AUTH_DHCHAP_MAX_DH_IDS; 160 161 return size; 162 } 163 164 static int nvme_auth_process_dhchap_challenge(struct nvme_ctrl *ctrl, 165 struct nvme_dhchap_queue_context *chap) 166 { 167 struct nvmf_auth_dhchap_challenge_data *data = chap->buf; 168 u16 dhvlen = le16_to_cpu(data->dhvlen); 169 size_t size = sizeof(*data) + data->hl + dhvlen; 170 const char *gid_name = nvme_auth_dhgroup_name(data->dhgid); 171 const char *hmac_name, *kpp_name; 172 173 if (size > CHAP_BUF_SIZE) { 174 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 175 return -EINVAL; 176 } 177 178 hmac_name = nvme_auth_hmac_name(data->hashid); 179 if (!hmac_name) { 180 dev_warn(ctrl->device, 181 "qid %d: invalid HASH ID %d\n", 182 chap->qid, data->hashid); 183 chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 184 return -EPROTO; 185 } 186 187 if (chap->hash_id == data->hashid && chap->hash_len == data->hl) { 188 dev_dbg(ctrl->device, 189 "qid %d: reuse existing hash %s\n", 190 chap->qid, hmac_name); 191 goto select_kpp; 192 } 193 194 if (nvme_auth_hmac_hash_len(data->hashid) != data->hl) { 195 dev_warn(ctrl->device, 196 "qid %d: invalid hash length %d\n", 197 chap->qid, data->hl); 198 chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 199 return -EPROTO; 200 } 201 202 chap->hash_id = data->hashid; 203 chap->hash_len = data->hl; 204 dev_dbg(ctrl->device, "qid %d: selected hash %s\n", 205 chap->qid, hmac_name); 206 207 select_kpp: 208 kpp_name = nvme_auth_dhgroup_kpp(data->dhgid); 209 if (!kpp_name) { 210 dev_warn(ctrl->device, 211 "qid %d: invalid DH group id %d\n", 212 chap->qid, data->dhgid); 213 chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; 214 /* Leave previous dh_tfm intact */ 215 return -EPROTO; 216 } 217 218 if (chap->dhgroup_id == data->dhgid && 219 (data->dhgid == NVME_AUTH_DHGROUP_NULL || chap->dh_tfm)) { 220 dev_dbg(ctrl->device, 221 "qid %d: reuse existing DH group %s\n", 222 chap->qid, gid_name); 223 goto skip_kpp; 224 } 225 226 /* Reset dh_tfm if it can't be reused */ 227 if (chap->dh_tfm) { 228 crypto_free_kpp(chap->dh_tfm); 229 chap->dh_tfm = NULL; 230 } 231 232 if (data->dhgid != NVME_AUTH_DHGROUP_NULL) { 233 if (dhvlen == 0) { 234 dev_warn(ctrl->device, 235 "qid %d: empty DH value\n", 236 chap->qid); 237 chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; 238 return -EPROTO; 239 } 240 241 chap->dh_tfm = crypto_alloc_kpp(kpp_name, 0, 0); 242 if (IS_ERR(chap->dh_tfm)) { 243 int ret = PTR_ERR(chap->dh_tfm); 244 245 dev_warn(ctrl->device, 246 "qid %d: error %d initializing DH group %s\n", 247 chap->qid, ret, gid_name); 248 chap->status = NVME_AUTH_DHCHAP_FAILURE_DHGROUP_UNUSABLE; 249 chap->dh_tfm = NULL; 250 return ret; 251 } 252 dev_dbg(ctrl->device, "qid %d: selected DH group %s\n", 253 chap->qid, gid_name); 254 } else if (dhvlen != 0) { 255 dev_warn(ctrl->device, 256 "qid %d: invalid DH value for NULL DH\n", 257 chap->qid); 258 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 259 return -EPROTO; 260 } 261 chap->dhgroup_id = data->dhgid; 262 263 skip_kpp: 264 chap->s1 = le32_to_cpu(data->seqnum); 265 memcpy(chap->c1, data->cval, chap->hash_len); 266 if (dhvlen) { 267 chap->ctrl_key = kmalloc(dhvlen, GFP_KERNEL); 268 if (!chap->ctrl_key) { 269 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 270 return -ENOMEM; 271 } 272 chap->ctrl_key_len = dhvlen; 273 memcpy(chap->ctrl_key, data->cval + chap->hash_len, 274 dhvlen); 275 dev_dbg(ctrl->device, "ctrl public key %*ph\n", 276 (int)chap->ctrl_key_len, chap->ctrl_key); 277 } 278 279 return 0; 280 } 281 282 static int nvme_auth_set_dhchap_reply_data(struct nvme_ctrl *ctrl, 283 struct nvme_dhchap_queue_context *chap) 284 { 285 struct nvmf_auth_dhchap_reply_data *data = chap->buf; 286 size_t size = sizeof(*data); 287 288 size += 2 * chap->hash_len; 289 290 if (chap->host_key_len) 291 size += chap->host_key_len; 292 293 if (size > CHAP_BUF_SIZE) { 294 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 295 return -EINVAL; 296 } 297 298 memset(chap->buf, 0, size); 299 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; 300 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_REPLY; 301 data->t_id = cpu_to_le16(chap->transaction); 302 data->hl = chap->hash_len; 303 data->dhvlen = cpu_to_le16(chap->host_key_len); 304 memcpy(data->rval, chap->response, chap->hash_len); 305 if (ctrl->ctrl_key) 306 chap->bi_directional = true; 307 if (ctrl->ctrl_key || ctrl->opts->concat) { 308 get_random_bytes(chap->c2, chap->hash_len); 309 data->cvalid = 1; 310 memcpy(data->rval + chap->hash_len, chap->c2, 311 chap->hash_len); 312 dev_dbg(ctrl->device, "%s: qid %d ctrl challenge %*ph\n", 313 __func__, chap->qid, (int)chap->hash_len, chap->c2); 314 } else { 315 memset(chap->c2, 0, chap->hash_len); 316 } 317 if (ctrl->opts->concat) { 318 chap->s2 = 0; 319 chap->bi_directional = false; 320 } else 321 chap->s2 = nvme_auth_get_seqnum(); 322 data->seqnum = cpu_to_le32(chap->s2); 323 if (chap->host_key_len) { 324 dev_dbg(ctrl->device, "%s: qid %d host public key %*ph\n", 325 __func__, chap->qid, 326 chap->host_key_len, chap->host_key); 327 memcpy(data->rval + 2 * chap->hash_len, chap->host_key, 328 chap->host_key_len); 329 } 330 331 return size; 332 } 333 334 static int nvme_auth_process_dhchap_success1(struct nvme_ctrl *ctrl, 335 struct nvme_dhchap_queue_context *chap) 336 { 337 struct nvmf_auth_dhchap_success1_data *data = chap->buf; 338 size_t size = sizeof(*data) + chap->hash_len; 339 340 if (size > CHAP_BUF_SIZE) { 341 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 342 return -EINVAL; 343 } 344 345 if (data->hl != chap->hash_len) { 346 dev_warn(ctrl->device, 347 "qid %d: invalid hash length %u\n", 348 chap->qid, data->hl); 349 chap->status = NVME_AUTH_DHCHAP_FAILURE_HASH_UNUSABLE; 350 return -EPROTO; 351 } 352 353 /* Just print out information for the admin queue */ 354 if (chap->qid == 0) 355 dev_info(ctrl->device, 356 "qid 0: authenticated with hash %s dhgroup %s\n", 357 nvme_auth_hmac_name(chap->hash_id), 358 nvme_auth_dhgroup_name(chap->dhgroup_id)); 359 360 if (!data->rvalid) 361 return 0; 362 363 /* Validate controller response */ 364 if (memcmp(chap->response, data->rval, data->hl)) { 365 dev_dbg(ctrl->device, "%s: qid %d ctrl response %*ph\n", 366 __func__, chap->qid, (int)chap->hash_len, data->rval); 367 dev_dbg(ctrl->device, "%s: qid %d host response %*ph\n", 368 __func__, chap->qid, (int)chap->hash_len, 369 chap->response); 370 dev_warn(ctrl->device, 371 "qid %d: controller authentication failed\n", 372 chap->qid); 373 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 374 return -ECONNREFUSED; 375 } 376 377 /* Just print out information for the admin queue */ 378 if (chap->qid == 0) 379 dev_info(ctrl->device, 380 "qid 0: controller authenticated\n"); 381 return 0; 382 } 383 384 static int nvme_auth_set_dhchap_success2_data(struct nvme_ctrl *ctrl, 385 struct nvme_dhchap_queue_context *chap) 386 { 387 struct nvmf_auth_dhchap_success2_data *data = chap->buf; 388 size_t size = sizeof(*data); 389 390 memset(chap->buf, 0, size); 391 data->auth_type = NVME_AUTH_DHCHAP_MESSAGES; 392 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_SUCCESS2; 393 data->t_id = cpu_to_le16(chap->transaction); 394 395 return size; 396 } 397 398 static int nvme_auth_set_dhchap_failure2_data(struct nvme_ctrl *ctrl, 399 struct nvme_dhchap_queue_context *chap) 400 { 401 struct nvmf_auth_dhchap_failure_data *data = chap->buf; 402 size_t size = sizeof(*data); 403 404 memset(chap->buf, 0, size); 405 data->auth_type = NVME_AUTH_COMMON_MESSAGES; 406 data->auth_id = NVME_AUTH_DHCHAP_MESSAGE_FAILURE2; 407 data->t_id = cpu_to_le16(chap->transaction); 408 data->rescode = NVME_AUTH_DHCHAP_FAILURE_REASON_FAILED; 409 data->rescode_exp = chap->status; 410 411 return size; 412 } 413 414 static int nvme_auth_dhchap_setup_host_response(struct nvme_ctrl *ctrl, 415 struct nvme_dhchap_queue_context *chap) 416 { 417 struct nvme_auth_hmac_ctx hmac; 418 u8 buf[4], *challenge = chap->c1; 419 int ret; 420 421 dev_dbg(ctrl->device, "%s: qid %d host response seq %u transaction %d\n", 422 __func__, chap->qid, chap->s1, chap->transaction); 423 424 if (!chap->transformed_key) { 425 chap->transformed_key = nvme_auth_transform_key(ctrl->host_key, 426 ctrl->opts->host->nqn); 427 if (IS_ERR(chap->transformed_key)) { 428 ret = PTR_ERR(chap->transformed_key); 429 chap->transformed_key = NULL; 430 return ret; 431 } 432 } else { 433 dev_dbg(ctrl->device, "%s: qid %d re-using host response\n", 434 __func__, chap->qid); 435 } 436 437 ret = nvme_auth_hmac_init(&hmac, chap->hash_id, 438 chap->transformed_key->key, 439 chap->transformed_key->len); 440 if (ret) 441 goto out; 442 443 if (chap->dh_tfm) { 444 challenge = kmalloc(chap->hash_len, GFP_KERNEL); 445 if (!challenge) { 446 ret = -ENOMEM; 447 goto out; 448 } 449 ret = nvme_auth_augmented_challenge(chap->hash_id, 450 chap->sess_key, 451 chap->sess_key_len, 452 chap->c1, challenge, 453 chap->hash_len); 454 if (ret) 455 goto out; 456 } 457 458 nvme_auth_hmac_update(&hmac, challenge, chap->hash_len); 459 460 put_unaligned_le32(chap->s1, buf); 461 nvme_auth_hmac_update(&hmac, buf, 4); 462 463 put_unaligned_le16(chap->transaction, buf); 464 nvme_auth_hmac_update(&hmac, buf, 2); 465 466 *buf = chap->sc_c; 467 nvme_auth_hmac_update(&hmac, buf, 1); 468 nvme_auth_hmac_update(&hmac, "HostHost", 8); 469 nvme_auth_hmac_update(&hmac, ctrl->opts->host->nqn, 470 strlen(ctrl->opts->host->nqn)); 471 memset(buf, 0, sizeof(buf)); 472 nvme_auth_hmac_update(&hmac, buf, 1); 473 nvme_auth_hmac_update(&hmac, ctrl->opts->subsysnqn, 474 strlen(ctrl->opts->subsysnqn)); 475 nvme_auth_hmac_final(&hmac, chap->response); 476 ret = 0; 477 out: 478 if (challenge != chap->c1) 479 kfree(challenge); 480 memzero_explicit(&hmac, sizeof(hmac)); 481 return ret; 482 } 483 484 static int nvme_auth_dhchap_setup_ctrl_response(struct nvme_ctrl *ctrl, 485 struct nvme_dhchap_queue_context *chap) 486 { 487 struct nvme_auth_hmac_ctx hmac; 488 struct nvme_dhchap_key *transformed_key; 489 u8 buf[4], *challenge = chap->c2; 490 int ret; 491 492 transformed_key = nvme_auth_transform_key(ctrl->ctrl_key, 493 ctrl->opts->subsysnqn); 494 if (IS_ERR(transformed_key)) { 495 ret = PTR_ERR(transformed_key); 496 return ret; 497 } 498 499 ret = nvme_auth_hmac_init(&hmac, chap->hash_id, transformed_key->key, 500 transformed_key->len); 501 if (ret) { 502 dev_warn(ctrl->device, "qid %d: failed to init hmac, error %d\n", 503 chap->qid, ret); 504 goto out; 505 } 506 507 if (chap->dh_tfm) { 508 challenge = kmalloc(chap->hash_len, GFP_KERNEL); 509 if (!challenge) { 510 ret = -ENOMEM; 511 goto out; 512 } 513 ret = nvme_auth_augmented_challenge(chap->hash_id, 514 chap->sess_key, 515 chap->sess_key_len, 516 chap->c2, challenge, 517 chap->hash_len); 518 if (ret) 519 goto out; 520 } 521 dev_dbg(ctrl->device, "%s: qid %d ctrl response seq %u transaction %d\n", 522 __func__, chap->qid, chap->s2, chap->transaction); 523 dev_dbg(ctrl->device, "%s: qid %d challenge %*ph\n", 524 __func__, chap->qid, (int)chap->hash_len, challenge); 525 dev_dbg(ctrl->device, "%s: qid %d subsysnqn %s\n", 526 __func__, chap->qid, ctrl->opts->subsysnqn); 527 dev_dbg(ctrl->device, "%s: qid %d hostnqn %s\n", 528 __func__, chap->qid, ctrl->opts->host->nqn); 529 530 nvme_auth_hmac_update(&hmac, challenge, chap->hash_len); 531 532 put_unaligned_le32(chap->s2, buf); 533 nvme_auth_hmac_update(&hmac, buf, 4); 534 535 put_unaligned_le16(chap->transaction, buf); 536 nvme_auth_hmac_update(&hmac, buf, 2); 537 538 memset(buf, 0, 4); 539 nvme_auth_hmac_update(&hmac, buf, 1); 540 nvme_auth_hmac_update(&hmac, "Controller", 10); 541 nvme_auth_hmac_update(&hmac, ctrl->opts->subsysnqn, 542 strlen(ctrl->opts->subsysnqn)); 543 nvme_auth_hmac_update(&hmac, buf, 1); 544 nvme_auth_hmac_update(&hmac, ctrl->opts->host->nqn, 545 strlen(ctrl->opts->host->nqn)); 546 nvme_auth_hmac_final(&hmac, chap->response); 547 ret = 0; 548 out: 549 if (challenge != chap->c2) 550 kfree(challenge); 551 memzero_explicit(&hmac, sizeof(hmac)); 552 nvme_auth_free_key(transformed_key); 553 return ret; 554 } 555 556 static int nvme_auth_dhchap_exponential(struct nvme_ctrl *ctrl, 557 struct nvme_dhchap_queue_context *chap) 558 { 559 int ret; 560 561 if (chap->host_key && chap->host_key_len) { 562 dev_dbg(ctrl->device, 563 "qid %d: reusing host key\n", chap->qid); 564 goto gen_sesskey; 565 } 566 ret = nvme_auth_gen_privkey(chap->dh_tfm, chap->dhgroup_id); 567 if (ret < 0) { 568 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 569 return ret; 570 } 571 572 chap->host_key_len = crypto_kpp_maxsize(chap->dh_tfm); 573 574 chap->host_key = kzalloc(chap->host_key_len, GFP_KERNEL); 575 if (!chap->host_key) { 576 chap->host_key_len = 0; 577 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 578 return -ENOMEM; 579 } 580 ret = nvme_auth_gen_pubkey(chap->dh_tfm, 581 chap->host_key, chap->host_key_len); 582 if (ret) { 583 dev_dbg(ctrl->device, 584 "failed to generate public key, error %d\n", ret); 585 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 586 return ret; 587 } 588 589 gen_sesskey: 590 chap->sess_key_len = chap->host_key_len; 591 chap->sess_key = kmalloc(chap->sess_key_len, GFP_KERNEL); 592 if (!chap->sess_key) { 593 chap->sess_key_len = 0; 594 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 595 return -ENOMEM; 596 } 597 598 ret = nvme_auth_gen_shared_secret(chap->dh_tfm, 599 chap->ctrl_key, chap->ctrl_key_len, 600 chap->sess_key, chap->sess_key_len); 601 if (ret) { 602 dev_dbg(ctrl->device, 603 "failed to generate shared secret, error %d\n", ret); 604 chap->status = NVME_AUTH_DHCHAP_FAILURE_INCORRECT_PAYLOAD; 605 return ret; 606 } 607 dev_dbg(ctrl->device, "shared secret %*ph\n", 608 (int)chap->sess_key_len, chap->sess_key); 609 return 0; 610 } 611 612 static void nvme_auth_reset_dhchap(struct nvme_dhchap_queue_context *chap) 613 { 614 nvme_auth_free_key(chap->transformed_key); 615 chap->transformed_key = NULL; 616 kfree_sensitive(chap->host_key); 617 chap->host_key = NULL; 618 chap->host_key_len = 0; 619 kfree_sensitive(chap->ctrl_key); 620 chap->ctrl_key = NULL; 621 chap->ctrl_key_len = 0; 622 kfree_sensitive(chap->sess_key); 623 chap->sess_key = NULL; 624 chap->sess_key_len = 0; 625 chap->status = 0; 626 chap->error = 0; 627 chap->s1 = 0; 628 chap->s2 = 0; 629 chap->bi_directional = false; 630 chap->transaction = 0; 631 memset(chap->c1, 0, sizeof(chap->c1)); 632 memset(chap->c2, 0, sizeof(chap->c2)); 633 mempool_free(chap->buf, nvme_chap_buf_pool); 634 chap->buf = NULL; 635 } 636 637 static void nvme_auth_free_dhchap(struct nvme_dhchap_queue_context *chap) 638 { 639 nvme_auth_reset_dhchap(chap); 640 chap->authenticated = false; 641 if (chap->dh_tfm) 642 crypto_free_kpp(chap->dh_tfm); 643 } 644 645 void nvme_auth_revoke_tls_key(struct nvme_ctrl *ctrl) 646 { 647 dev_dbg(ctrl->device, "Wipe generated TLS PSK %08x\n", 648 key_serial(ctrl->opts->tls_key)); 649 key_revoke(ctrl->opts->tls_key); 650 key_put(ctrl->opts->tls_key); 651 ctrl->opts->tls_key = NULL; 652 } 653 EXPORT_SYMBOL_GPL(nvme_auth_revoke_tls_key); 654 655 static int nvme_auth_secure_concat(struct nvme_ctrl *ctrl, 656 struct nvme_dhchap_queue_context *chap) 657 { 658 u8 *psk, *tls_psk; 659 char *digest; 660 struct key *tls_key; 661 size_t psk_len; 662 int ret = 0; 663 664 if (!chap->sess_key) { 665 dev_warn(ctrl->device, 666 "%s: qid %d no session key negotiated\n", 667 __func__, chap->qid); 668 return -ENOKEY; 669 } 670 671 if (chap->qid) { 672 dev_warn(ctrl->device, 673 "qid %d: secure concatenation not supported on I/O queues\n", 674 chap->qid); 675 return -EINVAL; 676 } 677 ret = nvme_auth_generate_psk(chap->hash_id, chap->sess_key, 678 chap->sess_key_len, 679 chap->c1, chap->c2, 680 chap->hash_len, &psk, &psk_len); 681 if (ret) { 682 dev_warn(ctrl->device, 683 "%s: qid %d failed to generate PSK, error %d\n", 684 __func__, chap->qid, ret); 685 return ret; 686 } 687 dev_dbg(ctrl->device, 688 "%s: generated psk %*ph\n", __func__, (int)psk_len, psk); 689 690 ret = nvme_auth_generate_digest(chap->hash_id, psk, psk_len, 691 ctrl->opts->subsysnqn, 692 ctrl->opts->host->nqn, &digest); 693 if (ret) { 694 dev_warn(ctrl->device, 695 "%s: qid %d failed to generate digest, error %d\n", 696 __func__, chap->qid, ret); 697 goto out_free_psk; 698 } 699 dev_dbg(ctrl->device, "%s: generated digest %s\n", 700 __func__, digest); 701 ret = nvme_auth_derive_tls_psk(chap->hash_id, psk, psk_len, 702 digest, &tls_psk); 703 if (ret) { 704 dev_warn(ctrl->device, 705 "%s: qid %d failed to derive TLS psk, error %d\n", 706 __func__, chap->qid, ret); 707 goto out_free_digest; 708 } 709 710 tls_key = nvme_tls_psk_refresh(ctrl->opts->keyring, 711 ctrl->opts->host->nqn, 712 ctrl->opts->subsysnqn, chap->hash_id, 713 tls_psk, psk_len, digest); 714 if (IS_ERR(tls_key)) { 715 ret = PTR_ERR(tls_key); 716 dev_warn(ctrl->device, 717 "%s: qid %d failed to insert generated key, error %d\n", 718 __func__, chap->qid, ret); 719 tls_key = NULL; 720 } 721 kfree_sensitive(tls_psk); 722 if (ctrl->opts->tls_key) 723 nvme_auth_revoke_tls_key(ctrl); 724 ctrl->opts->tls_key = tls_key; 725 out_free_digest: 726 kfree_sensitive(digest); 727 out_free_psk: 728 kfree_sensitive(psk); 729 return ret; 730 } 731 732 static void nvme_queue_auth_work(struct work_struct *work) 733 { 734 struct nvme_dhchap_queue_context *chap = 735 container_of(work, struct nvme_dhchap_queue_context, auth_work); 736 struct nvme_ctrl *ctrl = chap->ctrl; 737 size_t tl; 738 int ret = 0; 739 740 /* 741 * Allocate a large enough buffer for the entire negotiation: 742 * 4k is enough to ffdhe8192. 743 */ 744 chap->buf = mempool_alloc(nvme_chap_buf_pool, GFP_KERNEL); 745 if (!chap->buf) { 746 chap->error = -ENOMEM; 747 return; 748 } 749 750 chap->transaction = ctrl->transaction++; 751 752 /* DH-HMAC-CHAP Step 1: send negotiate */ 753 dev_dbg(ctrl->device, "%s: qid %d send negotiate\n", 754 __func__, chap->qid); 755 ret = nvme_auth_set_dhchap_negotiate_data(ctrl, chap); 756 if (ret < 0) { 757 chap->error = ret; 758 return; 759 } 760 tl = ret; 761 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); 762 if (ret) { 763 chap->error = ret; 764 return; 765 } 766 767 /* DH-HMAC-CHAP Step 2: receive challenge */ 768 dev_dbg(ctrl->device, "%s: qid %d receive challenge\n", 769 __func__, chap->qid); 770 771 memset(chap->buf, 0, CHAP_BUF_SIZE); 772 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE, 773 false); 774 if (ret) { 775 dev_warn(ctrl->device, 776 "qid %d failed to receive challenge, %s %d\n", 777 chap->qid, ret < 0 ? "error" : "nvme status", ret); 778 chap->error = ret; 779 return; 780 } 781 ret = nvme_auth_receive_validate(ctrl, chap->qid, chap->buf, chap->transaction, 782 NVME_AUTH_DHCHAP_MESSAGE_CHALLENGE); 783 if (ret) { 784 chap->status = ret; 785 chap->error = -EKEYREJECTED; 786 return; 787 } 788 789 ret = nvme_auth_process_dhchap_challenge(ctrl, chap); 790 if (ret) { 791 /* Invalid challenge parameters */ 792 chap->error = ret; 793 goto fail2; 794 } 795 796 if (chap->ctrl_key_len) { 797 dev_dbg(ctrl->device, 798 "%s: qid %d DH exponential\n", 799 __func__, chap->qid); 800 ret = nvme_auth_dhchap_exponential(ctrl, chap); 801 if (ret) { 802 chap->error = ret; 803 goto fail2; 804 } 805 } 806 807 dev_dbg(ctrl->device, "%s: qid %d host response\n", 808 __func__, chap->qid); 809 mutex_lock(&ctrl->dhchap_auth_mutex); 810 ret = nvme_auth_dhchap_setup_host_response(ctrl, chap); 811 mutex_unlock(&ctrl->dhchap_auth_mutex); 812 if (ret) { 813 chap->error = ret; 814 goto fail2; 815 } 816 817 /* DH-HMAC-CHAP Step 3: send reply */ 818 dev_dbg(ctrl->device, "%s: qid %d send reply\n", 819 __func__, chap->qid); 820 ret = nvme_auth_set_dhchap_reply_data(ctrl, chap); 821 if (ret < 0) { 822 chap->error = ret; 823 goto fail2; 824 } 825 826 tl = ret; 827 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); 828 if (ret) { 829 chap->error = ret; 830 goto fail2; 831 } 832 833 /* DH-HMAC-CHAP Step 4: receive success1 */ 834 dev_dbg(ctrl->device, "%s: qid %d receive success1\n", 835 __func__, chap->qid); 836 837 memset(chap->buf, 0, CHAP_BUF_SIZE); 838 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, CHAP_BUF_SIZE, 839 false); 840 if (ret) { 841 dev_warn(ctrl->device, 842 "qid %d failed to receive success1, %s %d\n", 843 chap->qid, ret < 0 ? "error" : "nvme status", ret); 844 chap->error = ret; 845 return; 846 } 847 ret = nvme_auth_receive_validate(ctrl, chap->qid, 848 chap->buf, chap->transaction, 849 NVME_AUTH_DHCHAP_MESSAGE_SUCCESS1); 850 if (ret) { 851 chap->status = ret; 852 chap->error = -EKEYREJECTED; 853 return; 854 } 855 856 mutex_lock(&ctrl->dhchap_auth_mutex); 857 if (ctrl->ctrl_key) { 858 dev_dbg(ctrl->device, 859 "%s: qid %d controller response\n", 860 __func__, chap->qid); 861 ret = nvme_auth_dhchap_setup_ctrl_response(ctrl, chap); 862 if (ret) { 863 mutex_unlock(&ctrl->dhchap_auth_mutex); 864 chap->error = ret; 865 goto fail2; 866 } 867 } 868 mutex_unlock(&ctrl->dhchap_auth_mutex); 869 870 ret = nvme_auth_process_dhchap_success1(ctrl, chap); 871 if (ret) { 872 /* Controller authentication failed */ 873 chap->error = -EKEYREJECTED; 874 goto fail2; 875 } 876 877 if (chap->bi_directional) { 878 /* DH-HMAC-CHAP Step 5: send success2 */ 879 dev_dbg(ctrl->device, "%s: qid %d send success2\n", 880 __func__, chap->qid); 881 tl = nvme_auth_set_dhchap_success2_data(ctrl, chap); 882 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); 883 if (ret) 884 chap->error = ret; 885 } 886 if (!ret) { 887 chap->error = 0; 888 chap->authenticated = true; 889 if (ctrl->opts->concat && 890 (ret = nvme_auth_secure_concat(ctrl, chap))) { 891 dev_warn(ctrl->device, 892 "%s: qid %d failed to enable secure concatenation\n", 893 __func__, chap->qid); 894 chap->error = ret; 895 chap->authenticated = false; 896 } 897 return; 898 } 899 900 fail2: 901 if (chap->status == 0) 902 chap->status = NVME_AUTH_DHCHAP_FAILURE_FAILED; 903 dev_dbg(ctrl->device, "%s: qid %d send failure2, status %x\n", 904 __func__, chap->qid, chap->status); 905 tl = nvme_auth_set_dhchap_failure2_data(ctrl, chap); 906 ret = nvme_auth_submit(ctrl, chap->qid, chap->buf, tl, true); 907 /* 908 * only update error if send failure2 failed and no other 909 * error had been set during authentication. 910 */ 911 if (ret && !chap->error) 912 chap->error = ret; 913 } 914 915 int nvme_auth_negotiate(struct nvme_ctrl *ctrl, int qid) 916 { 917 struct nvme_dhchap_queue_context *chap; 918 919 if (!ctrl->host_key) { 920 dev_warn(ctrl->device, "qid %d: no key\n", qid); 921 return -ENOKEY; 922 } 923 924 if (ctrl->opts->dhchap_ctrl_secret && !ctrl->ctrl_key) { 925 dev_warn(ctrl->device, "qid %d: invalid ctrl key\n", qid); 926 return -ENOKEY; 927 } 928 929 chap = &ctrl->dhchap_ctxs[qid]; 930 cancel_work_sync(&chap->auth_work); 931 queue_work(nvme_auth_wq, &chap->auth_work); 932 return 0; 933 } 934 EXPORT_SYMBOL_GPL(nvme_auth_negotiate); 935 936 int nvme_auth_wait(struct nvme_ctrl *ctrl, int qid) 937 { 938 struct nvme_dhchap_queue_context *chap; 939 int ret; 940 941 chap = &ctrl->dhchap_ctxs[qid]; 942 flush_work(&chap->auth_work); 943 ret = chap->error; 944 /* clear sensitive info */ 945 nvme_auth_reset_dhchap(chap); 946 return ret; 947 } 948 EXPORT_SYMBOL_GPL(nvme_auth_wait); 949 950 static void nvme_ctrl_auth_work(struct work_struct *work) 951 { 952 struct nvme_ctrl *ctrl = 953 container_of(work, struct nvme_ctrl, dhchap_auth_work); 954 int ret, q; 955 956 /* 957 * If the ctrl is no connected, bail as reconnect will handle 958 * authentication. 959 */ 960 if (nvme_ctrl_state(ctrl) != NVME_CTRL_LIVE) 961 return; 962 963 /* Authenticate admin queue first */ 964 ret = nvme_auth_negotiate(ctrl, 0); 965 if (ret) { 966 dev_warn(ctrl->device, 967 "qid 0: error %d setting up authentication\n", ret); 968 return; 969 } 970 ret = nvme_auth_wait(ctrl, 0); 971 if (ret) { 972 dev_warn(ctrl->device, 973 "qid 0: authentication failed\n"); 974 return; 975 } 976 /* 977 * Only run authentication on the admin queue for secure concatenation. 978 */ 979 if (ctrl->opts->concat) 980 return; 981 982 for (q = 1; q < ctrl->queue_count; q++) { 983 struct nvme_dhchap_queue_context *chap = 984 &ctrl->dhchap_ctxs[q]; 985 /* 986 * Skip re-authentication if the queue had 987 * not been authenticated initially. 988 */ 989 if (!chap->authenticated) 990 continue; 991 cancel_work_sync(&chap->auth_work); 992 queue_work(nvme_auth_wq, &chap->auth_work); 993 } 994 995 /* 996 * Failure is a soft-state; credentials remain valid until 997 * the controller terminates the connection. 998 */ 999 for (q = 1; q < ctrl->queue_count; q++) { 1000 struct nvme_dhchap_queue_context *chap = 1001 &ctrl->dhchap_ctxs[q]; 1002 if (!chap->authenticated) 1003 continue; 1004 flush_work(&chap->auth_work); 1005 ret = chap->error; 1006 nvme_auth_reset_dhchap(chap); 1007 if (ret) 1008 dev_warn(ctrl->device, 1009 "qid %d: authentication failed\n", q); 1010 } 1011 } 1012 1013 int nvme_auth_init_ctrl(struct nvme_ctrl *ctrl) 1014 { 1015 struct nvme_dhchap_queue_context *chap; 1016 int i, ret; 1017 1018 mutex_init(&ctrl->dhchap_auth_mutex); 1019 INIT_WORK(&ctrl->dhchap_auth_work, nvme_ctrl_auth_work); 1020 if (!ctrl->opts) 1021 return 0; 1022 ret = nvme_auth_parse_key(ctrl->opts->dhchap_secret, &ctrl->host_key); 1023 if (ret) 1024 return ret; 1025 ret = nvme_auth_parse_key(ctrl->opts->dhchap_ctrl_secret, 1026 &ctrl->ctrl_key); 1027 if (ret) 1028 goto err_free_dhchap_secret; 1029 1030 if (!ctrl->opts->dhchap_secret && !ctrl->opts->dhchap_ctrl_secret) 1031 return 0; 1032 1033 ctrl->dhchap_ctxs = kvzalloc_objs(*chap, ctrl_max_dhchaps(ctrl)); 1034 if (!ctrl->dhchap_ctxs) { 1035 ret = -ENOMEM; 1036 goto err_free_dhchap_ctrl_secret; 1037 } 1038 1039 for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) { 1040 chap = &ctrl->dhchap_ctxs[i]; 1041 chap->qid = i; 1042 chap->ctrl = ctrl; 1043 chap->authenticated = false; 1044 INIT_WORK(&chap->auth_work, nvme_queue_auth_work); 1045 } 1046 1047 return 0; 1048 err_free_dhchap_ctrl_secret: 1049 nvme_auth_free_key(ctrl->ctrl_key); 1050 ctrl->ctrl_key = NULL; 1051 err_free_dhchap_secret: 1052 nvme_auth_free_key(ctrl->host_key); 1053 ctrl->host_key = NULL; 1054 return ret; 1055 } 1056 EXPORT_SYMBOL_GPL(nvme_auth_init_ctrl); 1057 1058 void nvme_auth_stop(struct nvme_ctrl *ctrl) 1059 { 1060 cancel_work_sync(&ctrl->dhchap_auth_work); 1061 } 1062 EXPORT_SYMBOL_GPL(nvme_auth_stop); 1063 1064 void nvme_auth_free(struct nvme_ctrl *ctrl) 1065 { 1066 int i; 1067 1068 if (ctrl->dhchap_ctxs) { 1069 for (i = 0; i < ctrl_max_dhchaps(ctrl); i++) 1070 nvme_auth_free_dhchap(&ctrl->dhchap_ctxs[i]); 1071 kvfree(ctrl->dhchap_ctxs); 1072 } 1073 if (ctrl->host_key) { 1074 nvme_auth_free_key(ctrl->host_key); 1075 ctrl->host_key = NULL; 1076 } 1077 if (ctrl->ctrl_key) { 1078 nvme_auth_free_key(ctrl->ctrl_key); 1079 ctrl->ctrl_key = NULL; 1080 } 1081 } 1082 EXPORT_SYMBOL_GPL(nvme_auth_free); 1083 1084 int __init nvme_init_auth(void) 1085 { 1086 nvme_auth_wq = alloc_workqueue("nvme-auth-wq", 1087 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0); 1088 if (!nvme_auth_wq) 1089 return -ENOMEM; 1090 1091 nvme_chap_buf_cache = kmem_cache_create("nvme-chap-buf-cache", 1092 CHAP_BUF_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL); 1093 if (!nvme_chap_buf_cache) 1094 goto err_destroy_workqueue; 1095 1096 nvme_chap_buf_pool = mempool_create(16, mempool_alloc_slab, 1097 mempool_free_slab, nvme_chap_buf_cache); 1098 if (!nvme_chap_buf_pool) 1099 goto err_destroy_chap_buf_cache; 1100 1101 return 0; 1102 err_destroy_chap_buf_cache: 1103 kmem_cache_destroy(nvme_chap_buf_cache); 1104 err_destroy_workqueue: 1105 destroy_workqueue(nvme_auth_wq); 1106 return -ENOMEM; 1107 } 1108 1109 void __exit nvme_exit_auth(void) 1110 { 1111 mempool_destroy(nvme_chap_buf_pool); 1112 kmem_cache_destroy(nvme_chap_buf_cache); 1113 destroy_workqueue(nvme_auth_wq); 1114 } 1115