1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2020 Hannes Reinecke, SUSE Linux 4 */ 5 6 #include <linux/module.h> 7 #include <linux/crc32.h> 8 #include <linux/base64.h> 9 #include <linux/prandom.h> 10 #include <linux/scatterlist.h> 11 #include <linux/unaligned.h> 12 #include <crypto/hash.h> 13 #include <crypto/dh.h> 14 #include <crypto/hkdf.h> 15 #include <linux/nvme.h> 16 #include <linux/nvme-auth.h> 17 18 #define HKDF_MAX_HASHLEN 64 19 20 static u32 nvme_dhchap_seqnum; 21 static DEFINE_MUTEX(nvme_dhchap_mutex); 22 23 u32 nvme_auth_get_seqnum(void) 24 { 25 u32 seqnum; 26 27 mutex_lock(&nvme_dhchap_mutex); 28 if (!nvme_dhchap_seqnum) 29 nvme_dhchap_seqnum = get_random_u32(); 30 else { 31 nvme_dhchap_seqnum++; 32 if (!nvme_dhchap_seqnum) 33 nvme_dhchap_seqnum++; 34 } 35 seqnum = nvme_dhchap_seqnum; 36 mutex_unlock(&nvme_dhchap_mutex); 37 return seqnum; 38 } 39 EXPORT_SYMBOL_GPL(nvme_auth_get_seqnum); 40 41 static struct nvme_auth_dhgroup_map { 42 const char name[16]; 43 const char kpp[16]; 44 } dhgroup_map[] = { 45 [NVME_AUTH_DHGROUP_NULL] = { 46 .name = "null", .kpp = "null" }, 47 [NVME_AUTH_DHGROUP_2048] = { 48 .name = "ffdhe2048", .kpp = "ffdhe2048(dh)" }, 49 [NVME_AUTH_DHGROUP_3072] = { 50 .name = "ffdhe3072", .kpp = "ffdhe3072(dh)" }, 51 [NVME_AUTH_DHGROUP_4096] = { 52 .name = "ffdhe4096", .kpp = "ffdhe4096(dh)" }, 53 [NVME_AUTH_DHGROUP_6144] = { 54 .name = "ffdhe6144", .kpp = "ffdhe6144(dh)" }, 55 [NVME_AUTH_DHGROUP_8192] = { 56 .name = "ffdhe8192", .kpp = "ffdhe8192(dh)" }, 57 }; 58 59 const char *nvme_auth_dhgroup_name(u8 dhgroup_id) 60 { 61 if (dhgroup_id >= ARRAY_SIZE(dhgroup_map)) 62 return NULL; 63 return dhgroup_map[dhgroup_id].name; 64 } 65 EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_name); 66 67 const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id) 68 { 69 if (dhgroup_id >= ARRAY_SIZE(dhgroup_map)) 70 return NULL; 71 return dhgroup_map[dhgroup_id].kpp; 72 } 73 EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_kpp); 74 75 u8 nvme_auth_dhgroup_id(const char *dhgroup_name) 76 { 77 int i; 78 79 if (!dhgroup_name || !strlen(dhgroup_name)) 80 return NVME_AUTH_DHGROUP_INVALID; 81 for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) { 82 if (!strlen(dhgroup_map[i].name)) 83 continue; 84 if (!strncmp(dhgroup_map[i].name, dhgroup_name, 85 strlen(dhgroup_map[i].name))) 86 return i; 87 } 88 return NVME_AUTH_DHGROUP_INVALID; 89 } 90 EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_id); 91 92 static struct nvme_dhchap_hash_map { 93 int len; 94 const char hmac[15]; 95 const char digest[8]; 96 } hash_map[] = { 97 [NVME_AUTH_HASH_SHA256] = { 98 .len = 32, 99 .hmac = "hmac(sha256)", 100 .digest = "sha256", 101 }, 102 [NVME_AUTH_HASH_SHA384] = { 103 .len = 48, 104 .hmac = "hmac(sha384)", 105 .digest = "sha384", 106 }, 107 [NVME_AUTH_HASH_SHA512] = { 108 .len = 64, 109 .hmac = "hmac(sha512)", 110 .digest = "sha512", 111 }, 112 }; 113 114 const char *nvme_auth_hmac_name(u8 hmac_id) 115 { 116 if (hmac_id >= ARRAY_SIZE(hash_map)) 117 return NULL; 118 return hash_map[hmac_id].hmac; 119 } 120 EXPORT_SYMBOL_GPL(nvme_auth_hmac_name); 121 122 const char *nvme_auth_digest_name(u8 hmac_id) 123 { 124 if (hmac_id >= ARRAY_SIZE(hash_map)) 125 return NULL; 126 return hash_map[hmac_id].digest; 127 } 128 EXPORT_SYMBOL_GPL(nvme_auth_digest_name); 129 130 u8 nvme_auth_hmac_id(const char *hmac_name) 131 { 132 int i; 133 134 if (!hmac_name || !strlen(hmac_name)) 135 return NVME_AUTH_HASH_INVALID; 136 137 for (i = 0; i < ARRAY_SIZE(hash_map); i++) { 138 if (!strlen(hash_map[i].hmac)) 139 continue; 140 if (!strncmp(hash_map[i].hmac, hmac_name, 141 strlen(hash_map[i].hmac))) 142 return i; 143 } 144 return NVME_AUTH_HASH_INVALID; 145 } 146 EXPORT_SYMBOL_GPL(nvme_auth_hmac_id); 147 148 size_t nvme_auth_hmac_hash_len(u8 hmac_id) 149 { 150 if (hmac_id >= ARRAY_SIZE(hash_map)) 151 return 0; 152 return hash_map[hmac_id].len; 153 } 154 EXPORT_SYMBOL_GPL(nvme_auth_hmac_hash_len); 155 156 u32 nvme_auth_key_struct_size(u32 key_len) 157 { 158 struct nvme_dhchap_key key; 159 160 return struct_size(&key, key, key_len); 161 } 162 EXPORT_SYMBOL_GPL(nvme_auth_key_struct_size); 163 164 struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret, 165 u8 key_hash) 166 { 167 struct nvme_dhchap_key *key; 168 unsigned char *p; 169 u32 crc; 170 int ret, key_len; 171 size_t allocated_len = strlen(secret); 172 173 /* Secret might be affixed with a ':' */ 174 p = strrchr(secret, ':'); 175 if (p) 176 allocated_len = p - secret; 177 key = nvme_auth_alloc_key(allocated_len, 0); 178 if (!key) 179 return ERR_PTR(-ENOMEM); 180 181 key_len = base64_decode(secret, allocated_len, key->key); 182 if (key_len < 0) { 183 pr_debug("base64 key decoding error %d\n", 184 key_len); 185 ret = key_len; 186 goto out_free_secret; 187 } 188 189 if (key_len != 36 && key_len != 52 && 190 key_len != 68) { 191 pr_err("Invalid key len %d\n", key_len); 192 ret = -EINVAL; 193 goto out_free_secret; 194 } 195 196 /* The last four bytes is the CRC in little-endian format */ 197 key_len -= 4; 198 /* 199 * The linux implementation doesn't do pre- and post-increments, 200 * so we have to do it manually. 201 */ 202 crc = ~crc32(~0, key->key, key_len); 203 204 if (get_unaligned_le32(key->key + key_len) != crc) { 205 pr_err("key crc mismatch (key %08x, crc %08x)\n", 206 get_unaligned_le32(key->key + key_len), crc); 207 ret = -EKEYREJECTED; 208 goto out_free_secret; 209 } 210 key->len = key_len; 211 key->hash = key_hash; 212 return key; 213 out_free_secret: 214 nvme_auth_free_key(key); 215 return ERR_PTR(ret); 216 } 217 EXPORT_SYMBOL_GPL(nvme_auth_extract_key); 218 219 struct nvme_dhchap_key *nvme_auth_alloc_key(u32 len, u8 hash) 220 { 221 u32 num_bytes = nvme_auth_key_struct_size(len); 222 struct nvme_dhchap_key *key = kzalloc(num_bytes, GFP_KERNEL); 223 224 if (key) { 225 key->len = len; 226 key->hash = hash; 227 } 228 return key; 229 } 230 EXPORT_SYMBOL_GPL(nvme_auth_alloc_key); 231 232 void nvme_auth_free_key(struct nvme_dhchap_key *key) 233 { 234 if (!key) 235 return; 236 kfree_sensitive(key); 237 } 238 EXPORT_SYMBOL_GPL(nvme_auth_free_key); 239 240 struct nvme_dhchap_key *nvme_auth_transform_key( 241 struct nvme_dhchap_key *key, char *nqn) 242 { 243 const char *hmac_name; 244 struct crypto_shash *key_tfm; 245 struct shash_desc *shash; 246 struct nvme_dhchap_key *transformed_key; 247 int ret, key_len; 248 249 if (!key) { 250 pr_warn("No key specified\n"); 251 return ERR_PTR(-ENOKEY); 252 } 253 if (key->hash == 0) { 254 key_len = nvme_auth_key_struct_size(key->len); 255 transformed_key = kmemdup(key, key_len, GFP_KERNEL); 256 if (!transformed_key) 257 return ERR_PTR(-ENOMEM); 258 return transformed_key; 259 } 260 hmac_name = nvme_auth_hmac_name(key->hash); 261 if (!hmac_name) { 262 pr_warn("Invalid key hash id %d\n", key->hash); 263 return ERR_PTR(-EINVAL); 264 } 265 266 key_tfm = crypto_alloc_shash(hmac_name, 0, 0); 267 if (IS_ERR(key_tfm)) 268 return ERR_CAST(key_tfm); 269 270 shash = kmalloc(sizeof(struct shash_desc) + 271 crypto_shash_descsize(key_tfm), 272 GFP_KERNEL); 273 if (!shash) { 274 ret = -ENOMEM; 275 goto out_free_key; 276 } 277 278 key_len = crypto_shash_digestsize(key_tfm); 279 transformed_key = nvme_auth_alloc_key(key_len, key->hash); 280 if (!transformed_key) { 281 ret = -ENOMEM; 282 goto out_free_shash; 283 } 284 285 shash->tfm = key_tfm; 286 ret = crypto_shash_setkey(key_tfm, key->key, key->len); 287 if (ret < 0) 288 goto out_free_transformed_key; 289 ret = crypto_shash_init(shash); 290 if (ret < 0) 291 goto out_free_transformed_key; 292 ret = crypto_shash_update(shash, nqn, strlen(nqn)); 293 if (ret < 0) 294 goto out_free_transformed_key; 295 ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17); 296 if (ret < 0) 297 goto out_free_transformed_key; 298 ret = crypto_shash_final(shash, transformed_key->key); 299 if (ret < 0) 300 goto out_free_transformed_key; 301 302 kfree(shash); 303 crypto_free_shash(key_tfm); 304 305 return transformed_key; 306 307 out_free_transformed_key: 308 nvme_auth_free_key(transformed_key); 309 out_free_shash: 310 kfree(shash); 311 out_free_key: 312 crypto_free_shash(key_tfm); 313 314 return ERR_PTR(ret); 315 } 316 EXPORT_SYMBOL_GPL(nvme_auth_transform_key); 317 318 static int nvme_auth_hash_skey(int hmac_id, u8 *skey, size_t skey_len, u8 *hkey) 319 { 320 const char *digest_name; 321 struct crypto_shash *tfm; 322 int ret; 323 324 digest_name = nvme_auth_digest_name(hmac_id); 325 if (!digest_name) { 326 pr_debug("%s: failed to get digest for %d\n", __func__, 327 hmac_id); 328 return -EINVAL; 329 } 330 tfm = crypto_alloc_shash(digest_name, 0, 0); 331 if (IS_ERR(tfm)) 332 return -ENOMEM; 333 334 ret = crypto_shash_tfm_digest(tfm, skey, skey_len, hkey); 335 if (ret < 0) 336 pr_debug("%s: Failed to hash digest len %zu\n", __func__, 337 skey_len); 338 339 crypto_free_shash(tfm); 340 return ret; 341 } 342 343 int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len, 344 u8 *challenge, u8 *aug, size_t hlen) 345 { 346 struct crypto_shash *tfm; 347 u8 *hashed_key; 348 const char *hmac_name; 349 int ret; 350 351 hashed_key = kmalloc(hlen, GFP_KERNEL); 352 if (!hashed_key) 353 return -ENOMEM; 354 355 ret = nvme_auth_hash_skey(hmac_id, skey, 356 skey_len, hashed_key); 357 if (ret < 0) 358 goto out_free_key; 359 360 hmac_name = nvme_auth_hmac_name(hmac_id); 361 if (!hmac_name) { 362 pr_warn("%s: invalid hash algorithm %d\n", 363 __func__, hmac_id); 364 ret = -EINVAL; 365 goto out_free_key; 366 } 367 368 tfm = crypto_alloc_shash(hmac_name, 0, 0); 369 if (IS_ERR(tfm)) { 370 ret = PTR_ERR(tfm); 371 goto out_free_key; 372 } 373 374 ret = crypto_shash_setkey(tfm, hashed_key, hlen); 375 if (ret) 376 goto out_free_hash; 377 378 ret = crypto_shash_tfm_digest(tfm, challenge, hlen, aug); 379 out_free_hash: 380 crypto_free_shash(tfm); 381 out_free_key: 382 kfree_sensitive(hashed_key); 383 return ret; 384 } 385 EXPORT_SYMBOL_GPL(nvme_auth_augmented_challenge); 386 387 int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid) 388 { 389 int ret; 390 391 ret = crypto_kpp_set_secret(dh_tfm, NULL, 0); 392 if (ret) 393 pr_debug("failed to set private key, error %d\n", ret); 394 395 return ret; 396 } 397 EXPORT_SYMBOL_GPL(nvme_auth_gen_privkey); 398 399 int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm, 400 u8 *host_key, size_t host_key_len) 401 { 402 struct kpp_request *req; 403 struct crypto_wait wait; 404 struct scatterlist dst; 405 int ret; 406 407 req = kpp_request_alloc(dh_tfm, GFP_KERNEL); 408 if (!req) 409 return -ENOMEM; 410 411 crypto_init_wait(&wait); 412 kpp_request_set_input(req, NULL, 0); 413 sg_init_one(&dst, host_key, host_key_len); 414 kpp_request_set_output(req, &dst, host_key_len); 415 kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 416 crypto_req_done, &wait); 417 418 ret = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait); 419 kpp_request_free(req); 420 return ret; 421 } 422 EXPORT_SYMBOL_GPL(nvme_auth_gen_pubkey); 423 424 int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm, 425 u8 *ctrl_key, size_t ctrl_key_len, 426 u8 *sess_key, size_t sess_key_len) 427 { 428 struct kpp_request *req; 429 struct crypto_wait wait; 430 struct scatterlist src, dst; 431 int ret; 432 433 req = kpp_request_alloc(dh_tfm, GFP_KERNEL); 434 if (!req) 435 return -ENOMEM; 436 437 crypto_init_wait(&wait); 438 sg_init_one(&src, ctrl_key, ctrl_key_len); 439 kpp_request_set_input(req, &src, ctrl_key_len); 440 sg_init_one(&dst, sess_key, sess_key_len); 441 kpp_request_set_output(req, &dst, sess_key_len); 442 kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, 443 crypto_req_done, &wait); 444 445 ret = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait); 446 447 kpp_request_free(req); 448 return ret; 449 } 450 EXPORT_SYMBOL_GPL(nvme_auth_gen_shared_secret); 451 452 int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key) 453 { 454 struct nvme_dhchap_key *key; 455 u8 key_hash; 456 457 if (!secret) { 458 *ret_key = NULL; 459 return 0; 460 } 461 462 if (sscanf(secret, "DHHC-1:%hhd:%*s:", &key_hash) != 1) 463 return -EINVAL; 464 465 /* Pass in the secret without the 'DHHC-1:XX:' prefix */ 466 key = nvme_auth_extract_key(secret + 10, key_hash); 467 if (IS_ERR(key)) { 468 *ret_key = NULL; 469 return PTR_ERR(key); 470 } 471 472 *ret_key = key; 473 return 0; 474 } 475 EXPORT_SYMBOL_GPL(nvme_auth_generate_key); 476 477 /** 478 * nvme_auth_generate_psk - Generate a PSK for TLS 479 * @hmac_id: Hash function identifier 480 * @skey: Session key 481 * @skey_len: Length of @skey 482 * @c1: Value of challenge C1 483 * @c2: Value of challenge C2 484 * @hash_len: Hash length of the hash algorithm 485 * @ret_psk: Pointer too the resulting generated PSK 486 * @ret_len: length of @ret_psk 487 * 488 * Generate a PSK for TLS as specified in NVMe base specification, section 489 * 8.13.5.9: Generated PSK for TLS 490 * 491 * The generated PSK for TLS shall be computed applying the HMAC function 492 * using the hash function H( ) selected by the HashID parameter in the 493 * DH-HMAC-CHAP_Challenge message with the session key KS as key to the 494 * concatenation of the two challenges C1 and C2 (i.e., generated 495 * PSK = HMAC(KS, C1 || C2)). 496 * 497 * Returns 0 on success with a valid generated PSK pointer in @ret_psk and 498 * the length of @ret_psk in @ret_len, or a negative error number otherwise. 499 */ 500 int nvme_auth_generate_psk(u8 hmac_id, u8 *skey, size_t skey_len, 501 u8 *c1, u8 *c2, size_t hash_len, u8 **ret_psk, size_t *ret_len) 502 { 503 struct crypto_shash *tfm; 504 SHASH_DESC_ON_STACK(shash, tfm); 505 u8 *psk; 506 const char *hmac_name; 507 int ret, psk_len; 508 509 if (!c1 || !c2) 510 return -EINVAL; 511 512 hmac_name = nvme_auth_hmac_name(hmac_id); 513 if (!hmac_name) { 514 pr_warn("%s: invalid hash algorithm %d\n", 515 __func__, hmac_id); 516 return -EINVAL; 517 } 518 519 tfm = crypto_alloc_shash(hmac_name, 0, 0); 520 if (IS_ERR(tfm)) 521 return PTR_ERR(tfm); 522 523 psk_len = crypto_shash_digestsize(tfm); 524 psk = kzalloc(psk_len, GFP_KERNEL); 525 if (!psk) { 526 ret = -ENOMEM; 527 goto out_free_tfm; 528 } 529 530 shash->tfm = tfm; 531 ret = crypto_shash_setkey(tfm, skey, skey_len); 532 if (ret) 533 goto out_free_psk; 534 535 ret = crypto_shash_init(shash); 536 if (ret) 537 goto out_free_psk; 538 539 ret = crypto_shash_update(shash, c1, hash_len); 540 if (ret) 541 goto out_free_psk; 542 543 ret = crypto_shash_update(shash, c2, hash_len); 544 if (ret) 545 goto out_free_psk; 546 547 ret = crypto_shash_final(shash, psk); 548 if (!ret) { 549 *ret_psk = psk; 550 *ret_len = psk_len; 551 } 552 553 out_free_psk: 554 if (ret) 555 kfree_sensitive(psk); 556 out_free_tfm: 557 crypto_free_shash(tfm); 558 559 return ret; 560 } 561 EXPORT_SYMBOL_GPL(nvme_auth_generate_psk); 562 563 /** 564 * nvme_auth_generate_digest - Generate TLS PSK digest 565 * @hmac_id: Hash function identifier 566 * @psk: Generated input PSK 567 * @psk_len: Length of @psk 568 * @subsysnqn: NQN of the subsystem 569 * @hostnqn: NQN of the host 570 * @ret_digest: Pointer to the returned digest 571 * 572 * Generate a TLS PSK digest as specified in TP8018 Section 3.6.1.3: 573 * TLS PSK and PSK identity Derivation 574 * 575 * The PSK digest shall be computed by encoding in Base64 (refer to RFC 576 * 4648) the result of the application of the HMAC function using the hash 577 * function specified in item 4 above (ie the hash function of the cipher 578 * suite associated with the PSK identity) with the PSK as HMAC key to the 579 * concatenation of: 580 * - the NQN of the host (i.e., NQNh) not including the null terminator; 581 * - a space character; 582 * - the NQN of the NVM subsystem (i.e., NQNc) not including the null 583 * terminator; 584 * - a space character; and 585 * - the seventeen ASCII characters "NVMe-over-Fabrics" 586 * (i.e., <PSK digest> = Base64(HMAC(PSK, NQNh || " " || NQNc || " " || 587 * "NVMe-over-Fabrics"))). 588 * The length of the PSK digest depends on the hash function used to compute 589 * it as follows: 590 * - If the SHA-256 hash function is used, the resulting PSK digest is 44 591 * characters long; or 592 * - If the SHA-384 hash function is used, the resulting PSK digest is 64 593 * characters long. 594 * 595 * Returns 0 on success with a valid digest pointer in @ret_digest, or a 596 * negative error number on failure. 597 */ 598 int nvme_auth_generate_digest(u8 hmac_id, u8 *psk, size_t psk_len, 599 char *subsysnqn, char *hostnqn, u8 **ret_digest) 600 { 601 struct crypto_shash *tfm; 602 SHASH_DESC_ON_STACK(shash, tfm); 603 u8 *digest, *enc; 604 const char *hmac_name; 605 size_t digest_len, hmac_len; 606 int ret; 607 608 if (WARN_ON(!subsysnqn || !hostnqn)) 609 return -EINVAL; 610 611 hmac_name = nvme_auth_hmac_name(hmac_id); 612 if (!hmac_name) { 613 pr_warn("%s: invalid hash algorithm %d\n", 614 __func__, hmac_id); 615 return -EINVAL; 616 } 617 618 switch (nvme_auth_hmac_hash_len(hmac_id)) { 619 case 32: 620 hmac_len = 44; 621 break; 622 case 48: 623 hmac_len = 64; 624 break; 625 default: 626 pr_warn("%s: invalid hash algorithm '%s'\n", 627 __func__, hmac_name); 628 return -EINVAL; 629 } 630 631 enc = kzalloc(hmac_len + 1, GFP_KERNEL); 632 if (!enc) 633 return -ENOMEM; 634 635 tfm = crypto_alloc_shash(hmac_name, 0, 0); 636 if (IS_ERR(tfm)) { 637 ret = PTR_ERR(tfm); 638 goto out_free_enc; 639 } 640 641 digest_len = crypto_shash_digestsize(tfm); 642 digest = kzalloc(digest_len, GFP_KERNEL); 643 if (!digest) { 644 ret = -ENOMEM; 645 goto out_free_tfm; 646 } 647 648 shash->tfm = tfm; 649 ret = crypto_shash_setkey(tfm, psk, psk_len); 650 if (ret) 651 goto out_free_digest; 652 653 ret = crypto_shash_init(shash); 654 if (ret) 655 goto out_free_digest; 656 657 ret = crypto_shash_update(shash, hostnqn, strlen(hostnqn)); 658 if (ret) 659 goto out_free_digest; 660 661 ret = crypto_shash_update(shash, " ", 1); 662 if (ret) 663 goto out_free_digest; 664 665 ret = crypto_shash_update(shash, subsysnqn, strlen(subsysnqn)); 666 if (ret) 667 goto out_free_digest; 668 669 ret = crypto_shash_update(shash, " NVMe-over-Fabrics", 18); 670 if (ret) 671 goto out_free_digest; 672 673 ret = crypto_shash_final(shash, digest); 674 if (ret) 675 goto out_free_digest; 676 677 ret = base64_encode(digest, digest_len, enc); 678 if (ret < hmac_len) { 679 ret = -ENOKEY; 680 goto out_free_digest; 681 } 682 *ret_digest = enc; 683 ret = 0; 684 685 out_free_digest: 686 kfree_sensitive(digest); 687 out_free_tfm: 688 crypto_free_shash(tfm); 689 out_free_enc: 690 if (ret) 691 kfree_sensitive(enc); 692 693 return ret; 694 } 695 EXPORT_SYMBOL_GPL(nvme_auth_generate_digest); 696 697 /** 698 * nvme_auth_derive_tls_psk - Derive TLS PSK 699 * @hmac_id: Hash function identifier 700 * @psk: generated input PSK 701 * @psk_len: size of @psk 702 * @psk_digest: TLS PSK digest 703 * @ret_psk: Pointer to the resulting TLS PSK 704 * 705 * Derive a TLS PSK as specified in TP8018 Section 3.6.1.3: 706 * TLS PSK and PSK identity Derivation 707 * 708 * The TLS PSK shall be derived as follows from an input PSK 709 * (i.e., either a retained PSK or a generated PSK) and a PSK 710 * identity using the HKDF-Extract and HKDF-Expand-Label operations 711 * (refer to RFC 5869 and RFC 8446) where the hash function is the 712 * one specified by the hash specifier of the PSK identity: 713 * 1. PRK = HKDF-Extract(0, Input PSK); and 714 * 2. TLS PSK = HKDF-Expand-Label(PRK, "nvme-tls-psk", PskIdentityContext, L), 715 * where PskIdentityContext is the hash identifier indicated in 716 * the PSK identity concatenated to a space character and to the 717 * Base64 PSK digest (i.e., "<hash> <PSK digest>") and L is the 718 * output size in bytes of the hash function (i.e., 32 for SHA-256 719 * and 48 for SHA-384). 720 * 721 * Returns 0 on success with a valid psk pointer in @ret_psk or a negative 722 * error number otherwise. 723 */ 724 int nvme_auth_derive_tls_psk(int hmac_id, u8 *psk, size_t psk_len, 725 u8 *psk_digest, u8 **ret_psk) 726 { 727 struct crypto_shash *hmac_tfm; 728 const char *hmac_name; 729 const char *psk_prefix = "tls13 nvme-tls-psk"; 730 static const char default_salt[HKDF_MAX_HASHLEN]; 731 size_t info_len, prk_len; 732 char *info; 733 unsigned char *prk, *tls_key; 734 int ret; 735 736 hmac_name = nvme_auth_hmac_name(hmac_id); 737 if (!hmac_name) { 738 pr_warn("%s: invalid hash algorithm %d\n", 739 __func__, hmac_id); 740 return -EINVAL; 741 } 742 if (hmac_id == NVME_AUTH_HASH_SHA512) { 743 pr_warn("%s: unsupported hash algorithm %s\n", 744 __func__, hmac_name); 745 return -EINVAL; 746 } 747 748 hmac_tfm = crypto_alloc_shash(hmac_name, 0, 0); 749 if (IS_ERR(hmac_tfm)) 750 return PTR_ERR(hmac_tfm); 751 752 prk_len = crypto_shash_digestsize(hmac_tfm); 753 prk = kzalloc(prk_len, GFP_KERNEL); 754 if (!prk) { 755 ret = -ENOMEM; 756 goto out_free_shash; 757 } 758 759 if (WARN_ON(prk_len > HKDF_MAX_HASHLEN)) { 760 ret = -EINVAL; 761 goto out_free_prk; 762 } 763 ret = hkdf_extract(hmac_tfm, psk, psk_len, 764 default_salt, prk_len, prk); 765 if (ret) 766 goto out_free_prk; 767 768 ret = crypto_shash_setkey(hmac_tfm, prk, prk_len); 769 if (ret) 770 goto out_free_prk; 771 772 /* 773 * 2 addtional bytes for the length field from HDKF-Expand-Label, 774 * 2 addtional bytes for the HMAC ID, and one byte for the space 775 * separator. 776 */ 777 info_len = strlen(psk_digest) + strlen(psk_prefix) + 5; 778 info = kzalloc(info_len + 1, GFP_KERNEL); 779 if (!info) { 780 ret = -ENOMEM; 781 goto out_free_prk; 782 } 783 784 put_unaligned_be16(psk_len, info); 785 memcpy(info + 2, psk_prefix, strlen(psk_prefix)); 786 sprintf(info + 2 + strlen(psk_prefix), "%02d %s", hmac_id, psk_digest); 787 788 tls_key = kzalloc(psk_len, GFP_KERNEL); 789 if (!tls_key) { 790 ret = -ENOMEM; 791 goto out_free_info; 792 } 793 ret = hkdf_expand(hmac_tfm, info, info_len, tls_key, psk_len); 794 if (ret) { 795 kfree(tls_key); 796 goto out_free_info; 797 } 798 *ret_psk = tls_key; 799 800 out_free_info: 801 kfree(info); 802 out_free_prk: 803 kfree(prk); 804 out_free_shash: 805 crypto_free_shash(hmac_tfm); 806 807 return ret; 808 } 809 EXPORT_SYMBOL_GPL(nvme_auth_derive_tls_psk); 810 811 MODULE_DESCRIPTION("NVMe Authentication framework"); 812 MODULE_LICENSE("GPL v2"); 813