xref: /linux/drivers/nvme/common/auth.c (revision 71972b9ffe1efe183a87d76d094236f9ec30656e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2020 Hannes Reinecke, SUSE Linux
4  */
5 
6 #include <linux/module.h>
7 #include <linux/crc32.h>
8 #include <linux/base64.h>
9 #include <linux/prandom.h>
10 #include <linux/scatterlist.h>
11 #include <linux/unaligned.h>
12 #include <crypto/hash.h>
13 #include <crypto/dh.h>
14 #include <crypto/hkdf.h>
15 #include <linux/nvme.h>
16 #include <linux/nvme-auth.h>
17 
18 static u32 nvme_dhchap_seqnum;
19 static DEFINE_MUTEX(nvme_dhchap_mutex);
20 
21 u32 nvme_auth_get_seqnum(void)
22 {
23 	u32 seqnum;
24 
25 	mutex_lock(&nvme_dhchap_mutex);
26 	if (!nvme_dhchap_seqnum)
27 		nvme_dhchap_seqnum = get_random_u32();
28 	else {
29 		nvme_dhchap_seqnum++;
30 		if (!nvme_dhchap_seqnum)
31 			nvme_dhchap_seqnum++;
32 	}
33 	seqnum = nvme_dhchap_seqnum;
34 	mutex_unlock(&nvme_dhchap_mutex);
35 	return seqnum;
36 }
37 EXPORT_SYMBOL_GPL(nvme_auth_get_seqnum);
38 
39 static struct nvme_auth_dhgroup_map {
40 	const char name[16];
41 	const char kpp[16];
42 } dhgroup_map[] = {
43 	[NVME_AUTH_DHGROUP_NULL] = {
44 		.name = "null", .kpp = "null" },
45 	[NVME_AUTH_DHGROUP_2048] = {
46 		.name = "ffdhe2048", .kpp = "ffdhe2048(dh)" },
47 	[NVME_AUTH_DHGROUP_3072] = {
48 		.name = "ffdhe3072", .kpp = "ffdhe3072(dh)" },
49 	[NVME_AUTH_DHGROUP_4096] = {
50 		.name = "ffdhe4096", .kpp = "ffdhe4096(dh)" },
51 	[NVME_AUTH_DHGROUP_6144] = {
52 		.name = "ffdhe6144", .kpp = "ffdhe6144(dh)" },
53 	[NVME_AUTH_DHGROUP_8192] = {
54 		.name = "ffdhe8192", .kpp = "ffdhe8192(dh)" },
55 };
56 
57 const char *nvme_auth_dhgroup_name(u8 dhgroup_id)
58 {
59 	if (dhgroup_id >= ARRAY_SIZE(dhgroup_map))
60 		return NULL;
61 	return dhgroup_map[dhgroup_id].name;
62 }
63 EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_name);
64 
65 const char *nvme_auth_dhgroup_kpp(u8 dhgroup_id)
66 {
67 	if (dhgroup_id >= ARRAY_SIZE(dhgroup_map))
68 		return NULL;
69 	return dhgroup_map[dhgroup_id].kpp;
70 }
71 EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_kpp);
72 
73 u8 nvme_auth_dhgroup_id(const char *dhgroup_name)
74 {
75 	int i;
76 
77 	if (!dhgroup_name || !strlen(dhgroup_name))
78 		return NVME_AUTH_DHGROUP_INVALID;
79 	for (i = 0; i < ARRAY_SIZE(dhgroup_map); i++) {
80 		if (!strlen(dhgroup_map[i].name))
81 			continue;
82 		if (!strncmp(dhgroup_map[i].name, dhgroup_name,
83 			     strlen(dhgroup_map[i].name)))
84 			return i;
85 	}
86 	return NVME_AUTH_DHGROUP_INVALID;
87 }
88 EXPORT_SYMBOL_GPL(nvme_auth_dhgroup_id);
89 
90 static struct nvme_dhchap_hash_map {
91 	int len;
92 	const char hmac[15];
93 	const char digest[8];
94 } hash_map[] = {
95 	[NVME_AUTH_HASH_SHA256] = {
96 		.len = 32,
97 		.hmac = "hmac(sha256)",
98 		.digest = "sha256",
99 	},
100 	[NVME_AUTH_HASH_SHA384] = {
101 		.len = 48,
102 		.hmac = "hmac(sha384)",
103 		.digest = "sha384",
104 	},
105 	[NVME_AUTH_HASH_SHA512] = {
106 		.len = 64,
107 		.hmac = "hmac(sha512)",
108 		.digest = "sha512",
109 	},
110 };
111 
112 const char *nvme_auth_hmac_name(u8 hmac_id)
113 {
114 	if (hmac_id >= ARRAY_SIZE(hash_map))
115 		return NULL;
116 	return hash_map[hmac_id].hmac;
117 }
118 EXPORT_SYMBOL_GPL(nvme_auth_hmac_name);
119 
120 const char *nvme_auth_digest_name(u8 hmac_id)
121 {
122 	if (hmac_id >= ARRAY_SIZE(hash_map))
123 		return NULL;
124 	return hash_map[hmac_id].digest;
125 }
126 EXPORT_SYMBOL_GPL(nvme_auth_digest_name);
127 
128 u8 nvme_auth_hmac_id(const char *hmac_name)
129 {
130 	int i;
131 
132 	if (!hmac_name || !strlen(hmac_name))
133 		return NVME_AUTH_HASH_INVALID;
134 
135 	for (i = 0; i < ARRAY_SIZE(hash_map); i++) {
136 		if (!strlen(hash_map[i].hmac))
137 			continue;
138 		if (!strncmp(hash_map[i].hmac, hmac_name,
139 			     strlen(hash_map[i].hmac)))
140 			return i;
141 	}
142 	return NVME_AUTH_HASH_INVALID;
143 }
144 EXPORT_SYMBOL_GPL(nvme_auth_hmac_id);
145 
146 size_t nvme_auth_hmac_hash_len(u8 hmac_id)
147 {
148 	if (hmac_id >= ARRAY_SIZE(hash_map))
149 		return 0;
150 	return hash_map[hmac_id].len;
151 }
152 EXPORT_SYMBOL_GPL(nvme_auth_hmac_hash_len);
153 
154 u32 nvme_auth_key_struct_size(u32 key_len)
155 {
156 	struct nvme_dhchap_key key;
157 
158 	return struct_size(&key, key, key_len);
159 }
160 EXPORT_SYMBOL_GPL(nvme_auth_key_struct_size);
161 
162 struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
163 					      u8 key_hash)
164 {
165 	struct nvme_dhchap_key *key;
166 	unsigned char *p;
167 	u32 crc;
168 	int ret, key_len;
169 	size_t allocated_len = strlen(secret);
170 
171 	/* Secret might be affixed with a ':' */
172 	p = strrchr(secret, ':');
173 	if (p)
174 		allocated_len = p - secret;
175 	key = nvme_auth_alloc_key(allocated_len, 0);
176 	if (!key)
177 		return ERR_PTR(-ENOMEM);
178 
179 	key_len = base64_decode(secret, allocated_len, key->key);
180 	if (key_len < 0) {
181 		pr_debug("base64 key decoding error %d\n",
182 			 key_len);
183 		ret = key_len;
184 		goto out_free_secret;
185 	}
186 
187 	if (key_len != 36 && key_len != 52 &&
188 	    key_len != 68) {
189 		pr_err("Invalid key len %d\n", key_len);
190 		ret = -EINVAL;
191 		goto out_free_secret;
192 	}
193 
194 	/* The last four bytes is the CRC in little-endian format */
195 	key_len -= 4;
196 	/*
197 	 * The linux implementation doesn't do pre- and post-increments,
198 	 * so we have to do it manually.
199 	 */
200 	crc = ~crc32(~0, key->key, key_len);
201 
202 	if (get_unaligned_le32(key->key + key_len) != crc) {
203 		pr_err("key crc mismatch (key %08x, crc %08x)\n",
204 		       get_unaligned_le32(key->key + key_len), crc);
205 		ret = -EKEYREJECTED;
206 		goto out_free_secret;
207 	}
208 	key->len = key_len;
209 	key->hash = key_hash;
210 	return key;
211 out_free_secret:
212 	nvme_auth_free_key(key);
213 	return ERR_PTR(ret);
214 }
215 EXPORT_SYMBOL_GPL(nvme_auth_extract_key);
216 
217 struct nvme_dhchap_key *nvme_auth_alloc_key(u32 len, u8 hash)
218 {
219 	u32 num_bytes = nvme_auth_key_struct_size(len);
220 	struct nvme_dhchap_key *key = kzalloc(num_bytes, GFP_KERNEL);
221 
222 	if (key) {
223 		key->len = len;
224 		key->hash = hash;
225 	}
226 	return key;
227 }
228 EXPORT_SYMBOL_GPL(nvme_auth_alloc_key);
229 
230 void nvme_auth_free_key(struct nvme_dhchap_key *key)
231 {
232 	if (!key)
233 		return;
234 	kfree_sensitive(key);
235 }
236 EXPORT_SYMBOL_GPL(nvme_auth_free_key);
237 
238 struct nvme_dhchap_key *nvme_auth_transform_key(
239 		struct nvme_dhchap_key *key, char *nqn)
240 {
241 	const char *hmac_name;
242 	struct crypto_shash *key_tfm;
243 	struct shash_desc *shash;
244 	struct nvme_dhchap_key *transformed_key;
245 	int ret, key_len;
246 
247 	if (!key) {
248 		pr_warn("No key specified\n");
249 		return ERR_PTR(-ENOKEY);
250 	}
251 	if (key->hash == 0) {
252 		key_len = nvme_auth_key_struct_size(key->len);
253 		transformed_key = kmemdup(key, key_len, GFP_KERNEL);
254 		if (!transformed_key)
255 			return ERR_PTR(-ENOMEM);
256 		return transformed_key;
257 	}
258 	hmac_name = nvme_auth_hmac_name(key->hash);
259 	if (!hmac_name) {
260 		pr_warn("Invalid key hash id %d\n", key->hash);
261 		return ERR_PTR(-EINVAL);
262 	}
263 
264 	key_tfm = crypto_alloc_shash(hmac_name, 0, 0);
265 	if (IS_ERR(key_tfm))
266 		return ERR_CAST(key_tfm);
267 
268 	shash = kmalloc(sizeof(struct shash_desc) +
269 			crypto_shash_descsize(key_tfm),
270 			GFP_KERNEL);
271 	if (!shash) {
272 		ret = -ENOMEM;
273 		goto out_free_key;
274 	}
275 
276 	key_len = crypto_shash_digestsize(key_tfm);
277 	transformed_key = nvme_auth_alloc_key(key_len, key->hash);
278 	if (!transformed_key) {
279 		ret = -ENOMEM;
280 		goto out_free_shash;
281 	}
282 
283 	shash->tfm = key_tfm;
284 	ret = crypto_shash_setkey(key_tfm, key->key, key->len);
285 	if (ret < 0)
286 		goto out_free_transformed_key;
287 	ret = crypto_shash_init(shash);
288 	if (ret < 0)
289 		goto out_free_transformed_key;
290 	ret = crypto_shash_update(shash, nqn, strlen(nqn));
291 	if (ret < 0)
292 		goto out_free_transformed_key;
293 	ret = crypto_shash_update(shash, "NVMe-over-Fabrics", 17);
294 	if (ret < 0)
295 		goto out_free_transformed_key;
296 	ret = crypto_shash_final(shash, transformed_key->key);
297 	if (ret < 0)
298 		goto out_free_transformed_key;
299 
300 	kfree(shash);
301 	crypto_free_shash(key_tfm);
302 
303 	return transformed_key;
304 
305 out_free_transformed_key:
306 	nvme_auth_free_key(transformed_key);
307 out_free_shash:
308 	kfree(shash);
309 out_free_key:
310 	crypto_free_shash(key_tfm);
311 
312 	return ERR_PTR(ret);
313 }
314 EXPORT_SYMBOL_GPL(nvme_auth_transform_key);
315 
316 static int nvme_auth_hash_skey(int hmac_id, u8 *skey, size_t skey_len, u8 *hkey)
317 {
318 	const char *digest_name;
319 	struct crypto_shash *tfm;
320 	int ret;
321 
322 	digest_name = nvme_auth_digest_name(hmac_id);
323 	if (!digest_name) {
324 		pr_debug("%s: failed to get digest for %d\n", __func__,
325 			 hmac_id);
326 		return -EINVAL;
327 	}
328 	tfm = crypto_alloc_shash(digest_name, 0, 0);
329 	if (IS_ERR(tfm))
330 		return -ENOMEM;
331 
332 	ret = crypto_shash_tfm_digest(tfm, skey, skey_len, hkey);
333 	if (ret < 0)
334 		pr_debug("%s: Failed to hash digest len %zu\n", __func__,
335 			 skey_len);
336 
337 	crypto_free_shash(tfm);
338 	return ret;
339 }
340 
341 int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
342 		u8 *challenge, u8 *aug, size_t hlen)
343 {
344 	struct crypto_shash *tfm;
345 	u8 *hashed_key;
346 	const char *hmac_name;
347 	int ret;
348 
349 	hashed_key = kmalloc(hlen, GFP_KERNEL);
350 	if (!hashed_key)
351 		return -ENOMEM;
352 
353 	ret = nvme_auth_hash_skey(hmac_id, skey,
354 				  skey_len, hashed_key);
355 	if (ret < 0)
356 		goto out_free_key;
357 
358 	hmac_name = nvme_auth_hmac_name(hmac_id);
359 	if (!hmac_name) {
360 		pr_warn("%s: invalid hash algorithm %d\n",
361 			__func__, hmac_id);
362 		ret = -EINVAL;
363 		goto out_free_key;
364 	}
365 
366 	tfm = crypto_alloc_shash(hmac_name, 0, 0);
367 	if (IS_ERR(tfm)) {
368 		ret = PTR_ERR(tfm);
369 		goto out_free_key;
370 	}
371 
372 	ret = crypto_shash_setkey(tfm, hashed_key, hlen);
373 	if (ret)
374 		goto out_free_hash;
375 
376 	ret = crypto_shash_tfm_digest(tfm, challenge, hlen, aug);
377 out_free_hash:
378 	crypto_free_shash(tfm);
379 out_free_key:
380 	kfree_sensitive(hashed_key);
381 	return ret;
382 }
383 EXPORT_SYMBOL_GPL(nvme_auth_augmented_challenge);
384 
385 int nvme_auth_gen_privkey(struct crypto_kpp *dh_tfm, u8 dh_gid)
386 {
387 	int ret;
388 
389 	ret = crypto_kpp_set_secret(dh_tfm, NULL, 0);
390 	if (ret)
391 		pr_debug("failed to set private key, error %d\n", ret);
392 
393 	return ret;
394 }
395 EXPORT_SYMBOL_GPL(nvme_auth_gen_privkey);
396 
397 int nvme_auth_gen_pubkey(struct crypto_kpp *dh_tfm,
398 		u8 *host_key, size_t host_key_len)
399 {
400 	struct kpp_request *req;
401 	struct crypto_wait wait;
402 	struct scatterlist dst;
403 	int ret;
404 
405 	req = kpp_request_alloc(dh_tfm, GFP_KERNEL);
406 	if (!req)
407 		return -ENOMEM;
408 
409 	crypto_init_wait(&wait);
410 	kpp_request_set_input(req, NULL, 0);
411 	sg_init_one(&dst, host_key, host_key_len);
412 	kpp_request_set_output(req, &dst, host_key_len);
413 	kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
414 				 crypto_req_done, &wait);
415 
416 	ret = crypto_wait_req(crypto_kpp_generate_public_key(req), &wait);
417 	kpp_request_free(req);
418 	return ret;
419 }
420 EXPORT_SYMBOL_GPL(nvme_auth_gen_pubkey);
421 
422 int nvme_auth_gen_shared_secret(struct crypto_kpp *dh_tfm,
423 		u8 *ctrl_key, size_t ctrl_key_len,
424 		u8 *sess_key, size_t sess_key_len)
425 {
426 	struct kpp_request *req;
427 	struct crypto_wait wait;
428 	struct scatterlist src, dst;
429 	int ret;
430 
431 	req = kpp_request_alloc(dh_tfm, GFP_KERNEL);
432 	if (!req)
433 		return -ENOMEM;
434 
435 	crypto_init_wait(&wait);
436 	sg_init_one(&src, ctrl_key, ctrl_key_len);
437 	kpp_request_set_input(req, &src, ctrl_key_len);
438 	sg_init_one(&dst, sess_key, sess_key_len);
439 	kpp_request_set_output(req, &dst, sess_key_len);
440 	kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
441 				 crypto_req_done, &wait);
442 
443 	ret = crypto_wait_req(crypto_kpp_compute_shared_secret(req), &wait);
444 
445 	kpp_request_free(req);
446 	return ret;
447 }
448 EXPORT_SYMBOL_GPL(nvme_auth_gen_shared_secret);
449 
450 int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key)
451 {
452 	struct nvme_dhchap_key *key;
453 	u8 key_hash;
454 
455 	if (!secret) {
456 		*ret_key = NULL;
457 		return 0;
458 	}
459 
460 	if (sscanf(secret, "DHHC-1:%hhd:%*s:", &key_hash) != 1)
461 		return -EINVAL;
462 
463 	/* Pass in the secret without the 'DHHC-1:XX:' prefix */
464 	key = nvme_auth_extract_key(secret + 10, key_hash);
465 	if (IS_ERR(key)) {
466 		*ret_key = NULL;
467 		return PTR_ERR(key);
468 	}
469 
470 	*ret_key = key;
471 	return 0;
472 }
473 EXPORT_SYMBOL_GPL(nvme_auth_generate_key);
474 
475 /**
476  * nvme_auth_generate_psk - Generate a PSK for TLS
477  * @hmac_id: Hash function identifier
478  * @skey: Session key
479  * @skey_len: Length of @skey
480  * @c1: Value of challenge C1
481  * @c2: Value of challenge C2
482  * @hash_len: Hash length of the hash algorithm
483  * @ret_psk: Pointer too the resulting generated PSK
484  * @ret_len: length of @ret_psk
485  *
486  * Generate a PSK for TLS as specified in NVMe base specification, section
487  * 8.13.5.9: Generated PSK for TLS
488  *
489  * The generated PSK for TLS shall be computed applying the HMAC function
490  * using the hash function H( ) selected by the HashID parameter in the
491  * DH-HMAC-CHAP_Challenge message with the session key KS as key to the
492  * concatenation of the two challenges C1 and C2 (i.e., generated
493  * PSK = HMAC(KS, C1 || C2)).
494  *
495  * Returns 0 on success with a valid generated PSK pointer in @ret_psk and
496  * the length of @ret_psk in @ret_len, or a negative error number otherwise.
497  */
498 int nvme_auth_generate_psk(u8 hmac_id, u8 *skey, size_t skey_len,
499 		u8 *c1, u8 *c2, size_t hash_len, u8 **ret_psk, size_t *ret_len)
500 {
501 	struct crypto_shash *tfm;
502 	SHASH_DESC_ON_STACK(shash, tfm);
503 	u8 *psk;
504 	const char *hmac_name;
505 	int ret, psk_len;
506 
507 	if (!c1 || !c2)
508 		return -EINVAL;
509 
510 	hmac_name = nvme_auth_hmac_name(hmac_id);
511 	if (!hmac_name) {
512 		pr_warn("%s: invalid hash algorithm %d\n",
513 			__func__, hmac_id);
514 		return -EINVAL;
515 	}
516 
517 	tfm = crypto_alloc_shash(hmac_name, 0, 0);
518 	if (IS_ERR(tfm))
519 		return PTR_ERR(tfm);
520 
521 	psk_len = crypto_shash_digestsize(tfm);
522 	psk = kzalloc(psk_len, GFP_KERNEL);
523 	if (!psk) {
524 		ret = -ENOMEM;
525 		goto out_free_tfm;
526 	}
527 
528 	shash->tfm = tfm;
529 	ret = crypto_shash_setkey(tfm, skey, skey_len);
530 	if (ret)
531 		goto out_free_psk;
532 
533 	ret = crypto_shash_init(shash);
534 	if (ret)
535 		goto out_free_psk;
536 
537 	ret = crypto_shash_update(shash, c1, hash_len);
538 	if (ret)
539 		goto out_free_psk;
540 
541 	ret = crypto_shash_update(shash, c2, hash_len);
542 	if (ret)
543 		goto out_free_psk;
544 
545 	ret = crypto_shash_final(shash, psk);
546 	if (!ret) {
547 		*ret_psk = psk;
548 		*ret_len = psk_len;
549 	}
550 
551 out_free_psk:
552 	if (ret)
553 		kfree_sensitive(psk);
554 out_free_tfm:
555 	crypto_free_shash(tfm);
556 
557 	return ret;
558 }
559 EXPORT_SYMBOL_GPL(nvme_auth_generate_psk);
560 
561 /**
562  * nvme_auth_generate_digest - Generate TLS PSK digest
563  * @hmac_id: Hash function identifier
564  * @psk: Generated input PSK
565  * @psk_len: Length of @psk
566  * @subsysnqn: NQN of the subsystem
567  * @hostnqn: NQN of the host
568  * @ret_digest: Pointer to the returned digest
569  *
570  * Generate a TLS PSK digest as specified in TP8018 Section 3.6.1.3:
571  *   TLS PSK and PSK identity Derivation
572  *
573  * The PSK digest shall be computed by encoding in Base64 (refer to RFC
574  * 4648) the result of the application of the HMAC function using the hash
575  * function specified in item 4 above (ie the hash function of the cipher
576  * suite associated with the PSK identity) with the PSK as HMAC key to the
577  * concatenation of:
578  * - the NQN of the host (i.e., NQNh) not including the null terminator;
579  * - a space character;
580  * - the NQN of the NVM subsystem (i.e., NQNc) not including the null
581  *   terminator;
582  * - a space character; and
583  * - the seventeen ASCII characters "NVMe-over-Fabrics"
584  * (i.e., <PSK digest> = Base64(HMAC(PSK, NQNh || " " || NQNc || " " ||
585  *  "NVMe-over-Fabrics"))).
586  * The length of the PSK digest depends on the hash function used to compute
587  * it as follows:
588  * - If the SHA-256 hash function is used, the resulting PSK digest is 44
589  *   characters long; or
590  * - If the SHA-384 hash function is used, the resulting PSK digest is 64
591  *   characters long.
592  *
593  * Returns 0 on success with a valid digest pointer in @ret_digest, or a
594  * negative error number on failure.
595  */
596 int nvme_auth_generate_digest(u8 hmac_id, u8 *psk, size_t psk_len,
597 		char *subsysnqn, char *hostnqn, u8 **ret_digest)
598 {
599 	struct crypto_shash *tfm;
600 	SHASH_DESC_ON_STACK(shash, tfm);
601 	u8 *digest, *enc;
602 	const char *hmac_name;
603 	size_t digest_len, hmac_len;
604 	int ret;
605 
606 	if (WARN_ON(!subsysnqn || !hostnqn))
607 		return -EINVAL;
608 
609 	hmac_name = nvme_auth_hmac_name(hmac_id);
610 	if (!hmac_name) {
611 		pr_warn("%s: invalid hash algorithm %d\n",
612 			__func__, hmac_id);
613 		return -EINVAL;
614 	}
615 
616 	switch (nvme_auth_hmac_hash_len(hmac_id)) {
617 	case 32:
618 		hmac_len = 44;
619 		break;
620 	case 48:
621 		hmac_len = 64;
622 		break;
623 	default:
624 		pr_warn("%s: invalid hash algorithm '%s'\n",
625 			__func__, hmac_name);
626 		return -EINVAL;
627 	}
628 
629 	enc = kzalloc(hmac_len + 1, GFP_KERNEL);
630 	if (!enc)
631 		return -ENOMEM;
632 
633 	tfm = crypto_alloc_shash(hmac_name, 0, 0);
634 	if (IS_ERR(tfm)) {
635 		ret = PTR_ERR(tfm);
636 		goto out_free_enc;
637 	}
638 
639 	digest_len = crypto_shash_digestsize(tfm);
640 	digest = kzalloc(digest_len, GFP_KERNEL);
641 	if (!digest) {
642 		ret = -ENOMEM;
643 		goto out_free_tfm;
644 	}
645 
646 	shash->tfm = tfm;
647 	ret = crypto_shash_setkey(tfm, psk, psk_len);
648 	if (ret)
649 		goto out_free_digest;
650 
651 	ret = crypto_shash_init(shash);
652 	if (ret)
653 		goto out_free_digest;
654 
655 	ret = crypto_shash_update(shash, hostnqn, strlen(hostnqn));
656 	if (ret)
657 		goto out_free_digest;
658 
659 	ret = crypto_shash_update(shash, " ", 1);
660 	if (ret)
661 		goto out_free_digest;
662 
663 	ret = crypto_shash_update(shash, subsysnqn, strlen(subsysnqn));
664 	if (ret)
665 		goto out_free_digest;
666 
667 	ret = crypto_shash_update(shash, " NVMe-over-Fabrics", 18);
668 	if (ret)
669 		goto out_free_digest;
670 
671 	ret = crypto_shash_final(shash, digest);
672 	if (ret)
673 		goto out_free_digest;
674 
675 	ret = base64_encode(digest, digest_len, enc);
676 	if (ret < hmac_len) {
677 		ret = -ENOKEY;
678 		goto out_free_digest;
679 	}
680 	*ret_digest = enc;
681 	ret = 0;
682 
683 out_free_digest:
684 	kfree_sensitive(digest);
685 out_free_tfm:
686 	crypto_free_shash(tfm);
687 out_free_enc:
688 	if (ret)
689 		kfree_sensitive(enc);
690 
691 	return ret;
692 }
693 EXPORT_SYMBOL_GPL(nvme_auth_generate_digest);
694 
695 MODULE_DESCRIPTION("NVMe Authentication framework");
696 MODULE_LICENSE("GPL v2");
697