1 /* 2 * CDDL HEADER START 3 * 4 * This file and its contents are supplied under the terms of the 5 * Common Development and Distribution License ("CDDL"), version 1.0. 6 * You may only use this file in accordance with the terms of version 7 * 1.0 of the CDDL. 8 * 9 * A full copy of the text of the CDDL should have accompanied this 10 * source. A copy of the CDDL is also available via the Internet at 11 * http://www.illumos.org/license/CDDL. 12 * 13 * CDDL HEADER END 14 */ 15 16 /* 17 * Copyright (c) 2017, Datto, Inc. All rights reserved. 18 */ 19 20 #include <sys/zio_crypt.h> 21 #include <sys/dmu.h> 22 #include <sys/dmu_objset.h> 23 #include <sys/dnode.h> 24 #include <sys/fs/zfs.h> 25 #include <sys/zio.h> 26 #include <sys/zil.h> 27 #include <sys/sha2.h> 28 #include <sys/hkdf.h> 29 #include <sys/qat.h> 30 31 /* 32 * This file is responsible for handling all of the details of generating 33 * encryption parameters and performing encryption and authentication. 34 * 35 * BLOCK ENCRYPTION PARAMETERS: 36 * Encryption /Authentication Algorithm Suite (crypt): 37 * The encryption algorithm, mode, and key length we are going to use. We 38 * currently support AES in either GCM or CCM modes with 128, 192, and 256 bit 39 * keys. All authentication is currently done with SHA512-HMAC. 40 * 41 * Plaintext: 42 * The unencrypted data that we want to encrypt. 43 * 44 * Initialization Vector (IV): 45 * An initialization vector for the encryption algorithms. This is used to 46 * "tweak" the encryption algorithms so that two blocks of the same data are 47 * encrypted into different ciphertext outputs, thus obfuscating block patterns. 48 * The supported encryption modes (AES-GCM and AES-CCM) require that an IV is 49 * never reused with the same encryption key. This value is stored unencrypted 50 * and must simply be provided to the decryption function. We use a 96 bit IV 51 * (as recommended by NIST) for all block encryption. For non-dedup blocks we 52 * derive the IV randomly. The first 64 bits of the IV are stored in the second 53 * word of DVA[2] and the remaining 32 bits are stored in the upper 32 bits of 54 * blk_fill. This is safe because encrypted blocks can't use the upper 32 bits 55 * of blk_fill. We only encrypt level 0 blocks, which normally have a fill count 56 * of 1. The only exception is for DMU_OT_DNODE objects, where the fill count of 57 * level 0 blocks is the number of allocated dnodes in that block. The on-disk 58 * format supports at most 2^15 slots per L0 dnode block, because the maximum 59 * block size is 16MB (2^24). In either case, for level 0 blocks this number 60 * will still be smaller than UINT32_MAX so it is safe to store the IV in the 61 * top 32 bits of blk_fill, while leaving the bottom 32 bits of the fill count 62 * for the dnode code. 63 * 64 * Master key: 65 * This is the most important secret data of an encrypted dataset. It is used 66 * along with the salt to generate that actual encryption keys via HKDF. We 67 * do not use the master key to directly encrypt any data because there are 68 * theoretical limits on how much data can actually be safely encrypted with 69 * any encryption mode. The master key is stored encrypted on disk with the 70 * user's wrapping key. Its length is determined by the encryption algorithm. 71 * For details on how this is stored see the block comment in dsl_crypt.c 72 * 73 * Salt: 74 * Used as an input to the HKDF function, along with the master key. We use a 75 * 64 bit salt, stored unencrypted in the first word of DVA[2]. Any given salt 76 * can be used for encrypting many blocks, so we cache the current salt and the 77 * associated derived key in zio_crypt_t so we do not need to derive it again 78 * needlessly. 79 * 80 * Encryption Key: 81 * A secret binary key, generated from an HKDF function used to encrypt and 82 * decrypt data. 83 * 84 * Message Authentication Code (MAC) 85 * The MAC is an output of authenticated encryption modes such as AES-GCM and 86 * AES-CCM. Its purpose is to ensure that an attacker cannot modify encrypted 87 * data on disk and return garbage to the application. Effectively, it is a 88 * checksum that can not be reproduced by an attacker. We store the MAC in the 89 * second 128 bits of blk_cksum, leaving the first 128 bits for a truncated 90 * regular checksum of the ciphertext which can be used for scrubbing. 91 * 92 * OBJECT AUTHENTICATION: 93 * Some object types, such as DMU_OT_MASTER_NODE cannot be encrypted because 94 * they contain some info that always needs to be readable. To prevent this 95 * data from being altered, we authenticate this data using SHA512-HMAC. This 96 * will produce a MAC (similar to the one produced via encryption) which can 97 * be used to verify the object was not modified. HMACs do not require key 98 * rotation or IVs, so we can keep up to the full 3 copies of authenticated 99 * data. 100 * 101 * ZIL ENCRYPTION: 102 * ZIL blocks have their bp written to disk ahead of the associated data, so we 103 * cannot store the MAC there as we normally do. For these blocks the MAC is 104 * stored in the embedded checksum within the zil_chain_t header. The salt and 105 * IV are generated for the block on bp allocation instead of at encryption 106 * time. In addition, ZIL blocks have some pieces that must be left in plaintext 107 * for claiming even though all of the sensitive user data still needs to be 108 * encrypted. The function zio_crypt_init_uios_zil() handles parsing which 109 * pieces of the block need to be encrypted. All data that is not encrypted is 110 * authenticated using the AAD mechanisms that the supported encryption modes 111 * provide for. In order to preserve the semantics of the ZIL for encrypted 112 * datasets, the ZIL is not protected at the objset level as described below. 113 * 114 * DNODE ENCRYPTION: 115 * Similarly to ZIL blocks, the core part of each dnode_phys_t needs to be left 116 * in plaintext for scrubbing and claiming, but the bonus buffers might contain 117 * sensitive user data. The function zio_crypt_init_uios_dnode() handles parsing 118 * which pieces of the block need to be encrypted. For more details about 119 * dnode authentication and encryption, see zio_crypt_init_uios_dnode(). 120 * 121 * OBJECT SET AUTHENTICATION: 122 * Up to this point, everything we have encrypted and authenticated has been 123 * at level 0 (or -2 for the ZIL). If we did not do any further work the 124 * on-disk format would be susceptible to attacks that deleted or rearranged 125 * the order of level 0 blocks. Ideally, the cleanest solution would be to 126 * maintain a tree of authentication MACs going up the bp tree. However, this 127 * presents a problem for raw sends. Send files do not send information about 128 * indirect blocks so there would be no convenient way to transfer the MACs and 129 * they cannot be recalculated on the receive side without the master key which 130 * would defeat one of the purposes of raw sends in the first place. Instead, 131 * for the indirect levels of the bp tree, we use a regular SHA512 of the MACs 132 * from the level below. We also include some portable fields from blk_prop such 133 * as the lsize and compression algorithm to prevent the data from being 134 * misinterpreted. 135 * 136 * At the objset level, we maintain 2 separate 256 bit MACs in the 137 * objset_phys_t. The first one is "portable" and is the logical root of the 138 * MAC tree maintained in the metadnode's bps. The second, is "local" and is 139 * used as the root MAC for the user accounting objects, which are also not 140 * transferred via "zfs send". The portable MAC is sent in the DRR_BEGIN payload 141 * of the send file. The useraccounting code ensures that the useraccounting 142 * info is not present upon a receive, so the local MAC can simply be cleared 143 * out at that time. For more info about objset_phys_t authentication, see 144 * zio_crypt_do_objset_hmacs(). 145 * 146 * CONSIDERATIONS FOR DEDUP: 147 * In order for dedup to work, blocks that we want to dedup with one another 148 * need to use the same IV and encryption key, so that they will have the same 149 * ciphertext. Normally, one should never reuse an IV with the same encryption 150 * key or else AES-GCM and AES-CCM can both actually leak the plaintext of both 151 * blocks. In this case, however, since we are using the same plaintext as 152 * well all that we end up with is a duplicate of the original ciphertext we 153 * already had. As a result, an attacker with read access to the raw disk will 154 * be able to tell which blocks are the same but this information is given away 155 * by dedup anyway. In order to get the same IVs and encryption keys for 156 * equivalent blocks of data we use an HMAC of the plaintext. We use an HMAC 157 * here so that a reproducible checksum of the plaintext is never available to 158 * the attacker. The HMAC key is kept alongside the master key, encrypted on 159 * disk. The first 64 bits of the HMAC are used in place of the random salt, and 160 * the next 96 bits are used as the IV. As a result of this mechanism, dedup 161 * will only work within a clone family since encrypted dedup requires use of 162 * the same master and HMAC keys. 163 */ 164 165 /* 166 * After encrypting many blocks with the same key we may start to run up 167 * against the theoretical limits of how much data can securely be encrypted 168 * with a single key using the supported encryption modes. The most obvious 169 * limitation is that our risk of generating 2 equivalent 96 bit IVs increases 170 * the more IVs we generate (which both GCM and CCM modes strictly forbid). 171 * This risk actually grows surprisingly quickly over time according to the 172 * Birthday Problem. With a total IV space of 2^(96 bits), and assuming we have 173 * generated n IVs with a cryptographically secure RNG, the approximate 174 * probability p(n) of a collision is given as: 175 * 176 * p(n) ~= e^(-n*(n-1)/(2*(2^96))) 177 * 178 * [http://www.math.cornell.edu/~mec/2008-2009/TianyiZheng/Birthday.html] 179 * 180 * Assuming that we want to ensure that p(n) never goes over 1 / 1 trillion 181 * we must not write more than 398,065,730 blocks with the same encryption key. 182 * Therefore, we rotate our keys after 400,000,000 blocks have been written by 183 * generating a new random 64 bit salt for our HKDF encryption key generation 184 * function. 185 */ 186 #define ZFS_KEY_MAX_SALT_USES_DEFAULT 400000000 187 #define ZFS_CURRENT_MAX_SALT_USES \ 188 (MIN(zfs_key_max_salt_uses, ZFS_KEY_MAX_SALT_USES_DEFAULT)) 189 static unsigned long zfs_key_max_salt_uses = ZFS_KEY_MAX_SALT_USES_DEFAULT; 190 191 typedef struct blkptr_auth_buf { 192 uint64_t bab_prop; /* blk_prop - portable mask */ 193 uint8_t bab_mac[ZIO_DATA_MAC_LEN]; /* MAC from blk_cksum */ 194 uint64_t bab_pad; /* reserved for future use */ 195 } blkptr_auth_buf_t; 196 197 const zio_crypt_info_t zio_crypt_table[ZIO_CRYPT_FUNCTIONS] = { 198 {"", ZC_TYPE_NONE, 0, "inherit"}, 199 {"", ZC_TYPE_NONE, 0, "on"}, 200 {"", ZC_TYPE_NONE, 0, "off"}, 201 {SUN_CKM_AES_CCM, ZC_TYPE_CCM, 16, "aes-128-ccm"}, 202 {SUN_CKM_AES_CCM, ZC_TYPE_CCM, 24, "aes-192-ccm"}, 203 {SUN_CKM_AES_CCM, ZC_TYPE_CCM, 32, "aes-256-ccm"}, 204 {SUN_CKM_AES_GCM, ZC_TYPE_GCM, 16, "aes-128-gcm"}, 205 {SUN_CKM_AES_GCM, ZC_TYPE_GCM, 24, "aes-192-gcm"}, 206 {SUN_CKM_AES_GCM, ZC_TYPE_GCM, 32, "aes-256-gcm"} 207 }; 208 209 void 210 zio_crypt_key_destroy(zio_crypt_key_t *key) 211 { 212 rw_destroy(&key->zk_salt_lock); 213 214 /* free crypto templates */ 215 crypto_destroy_ctx_template(key->zk_current_tmpl); 216 crypto_destroy_ctx_template(key->zk_hmac_tmpl); 217 218 /* zero out sensitive data */ 219 memset(key, 0, sizeof (zio_crypt_key_t)); 220 } 221 222 int 223 zio_crypt_key_init(uint64_t crypt, zio_crypt_key_t *key) 224 { 225 int ret; 226 crypto_mechanism_t mech = {0}; 227 uint_t keydata_len; 228 229 ASSERT(key != NULL); 230 ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS); 231 232 /* 233 * Workaround for GCC 12+ with UBSan enabled deficencies. 234 * 235 * GCC 12+ invoked with -fsanitize=undefined incorrectly reports the code 236 * below as violating -Warray-bounds 237 */ 238 #if defined(__GNUC__) && !defined(__clang__) && \ 239 ((!defined(_KERNEL) && defined(ZFS_UBSAN_ENABLED)) || \ 240 defined(CONFIG_UBSAN)) 241 #pragma GCC diagnostic push 242 #pragma GCC diagnostic ignored "-Warray-bounds" 243 #endif 244 keydata_len = zio_crypt_table[crypt].ci_keylen; 245 #if defined(__GNUC__) && !defined(__clang__) && \ 246 ((!defined(_KERNEL) && defined(ZFS_UBSAN_ENABLED)) || \ 247 defined(CONFIG_UBSAN)) 248 #pragma GCC diagnostic pop 249 #endif 250 memset(key, 0, sizeof (zio_crypt_key_t)); 251 rw_init(&key->zk_salt_lock, NULL, RW_DEFAULT, NULL); 252 253 /* fill keydata buffers and salt with random data */ 254 ret = random_get_bytes((uint8_t *)&key->zk_guid, sizeof (uint64_t)); 255 if (ret != 0) 256 goto error; 257 258 ret = random_get_bytes(key->zk_master_keydata, keydata_len); 259 if (ret != 0) 260 goto error; 261 262 ret = random_get_bytes(key->zk_hmac_keydata, SHA512_HMAC_KEYLEN); 263 if (ret != 0) 264 goto error; 265 266 ret = random_get_bytes(key->zk_salt, ZIO_DATA_SALT_LEN); 267 if (ret != 0) 268 goto error; 269 270 /* derive the current key from the master key */ 271 ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0, 272 key->zk_salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata, 273 keydata_len); 274 if (ret != 0) 275 goto error; 276 277 /* initialize keys for the ICP */ 278 key->zk_current_key.ck_data = key->zk_current_keydata; 279 key->zk_current_key.ck_length = CRYPTO_BYTES2BITS(keydata_len); 280 281 key->zk_hmac_key.ck_data = &key->zk_hmac_key; 282 key->zk_hmac_key.ck_length = CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN); 283 284 /* 285 * Initialize the crypto templates. It's ok if this fails because 286 * this is just an optimization. 287 */ 288 mech.cm_type = crypto_mech2id(zio_crypt_table[crypt].ci_mechname); 289 ret = crypto_create_ctx_template(&mech, &key->zk_current_key, 290 &key->zk_current_tmpl); 291 if (ret != CRYPTO_SUCCESS) 292 key->zk_current_tmpl = NULL; 293 294 mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC); 295 ret = crypto_create_ctx_template(&mech, &key->zk_hmac_key, 296 &key->zk_hmac_tmpl); 297 if (ret != CRYPTO_SUCCESS) 298 key->zk_hmac_tmpl = NULL; 299 300 key->zk_crypt = crypt; 301 key->zk_version = ZIO_CRYPT_KEY_CURRENT_VERSION; 302 key->zk_salt_count = 0; 303 304 return (0); 305 306 error: 307 zio_crypt_key_destroy(key); 308 return (ret); 309 } 310 311 static int 312 zio_crypt_key_change_salt(zio_crypt_key_t *key) 313 { 314 int ret = 0; 315 uint8_t salt[ZIO_DATA_SALT_LEN]; 316 crypto_mechanism_t mech; 317 uint_t keydata_len = zio_crypt_table[key->zk_crypt].ci_keylen; 318 319 /* generate a new salt */ 320 ret = random_get_bytes(salt, ZIO_DATA_SALT_LEN); 321 if (ret != 0) 322 goto error; 323 324 rw_enter(&key->zk_salt_lock, RW_WRITER); 325 326 /* someone beat us to the salt rotation, just unlock and return */ 327 if (key->zk_salt_count < ZFS_CURRENT_MAX_SALT_USES) 328 goto out_unlock; 329 330 /* derive the current key from the master key and the new salt */ 331 ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0, 332 salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata, keydata_len); 333 if (ret != 0) 334 goto out_unlock; 335 336 /* assign the salt and reset the usage count */ 337 memcpy(key->zk_salt, salt, ZIO_DATA_SALT_LEN); 338 key->zk_salt_count = 0; 339 340 /* destroy the old context template and create the new one */ 341 crypto_destroy_ctx_template(key->zk_current_tmpl); 342 ret = crypto_create_ctx_template(&mech, &key->zk_current_key, 343 &key->zk_current_tmpl); 344 if (ret != CRYPTO_SUCCESS) 345 key->zk_current_tmpl = NULL; 346 347 rw_exit(&key->zk_salt_lock); 348 349 return (0); 350 351 out_unlock: 352 rw_exit(&key->zk_salt_lock); 353 error: 354 return (ret); 355 } 356 357 /* See comment above zfs_key_max_salt_uses definition for details */ 358 int 359 zio_crypt_key_get_salt(zio_crypt_key_t *key, uint8_t *salt) 360 { 361 int ret; 362 boolean_t salt_change; 363 364 rw_enter(&key->zk_salt_lock, RW_READER); 365 366 memcpy(salt, key->zk_salt, ZIO_DATA_SALT_LEN); 367 salt_change = (atomic_inc_64_nv(&key->zk_salt_count) >= 368 ZFS_CURRENT_MAX_SALT_USES); 369 370 rw_exit(&key->zk_salt_lock); 371 372 if (salt_change) { 373 ret = zio_crypt_key_change_salt(key); 374 if (ret != 0) 375 goto error; 376 } 377 378 return (0); 379 380 error: 381 return (ret); 382 } 383 384 /* 385 * This function handles all encryption and decryption in zfs. When 386 * encrypting it expects puio to reference the plaintext and cuio to 387 * reference the ciphertext. cuio must have enough space for the 388 * ciphertext + room for a MAC. datalen should be the length of the 389 * plaintext / ciphertext alone. 390 */ 391 static int 392 zio_do_crypt_uio(boolean_t encrypt, uint64_t crypt, crypto_key_t *key, 393 crypto_ctx_template_t tmpl, uint8_t *ivbuf, uint_t datalen, 394 zfs_uio_t *puio, zfs_uio_t *cuio, uint8_t *authbuf, uint_t auth_len) 395 { 396 int ret; 397 crypto_data_t plaindata, cipherdata; 398 CK_AES_CCM_PARAMS ccmp; 399 CK_AES_GCM_PARAMS gcmp; 400 crypto_mechanism_t mech; 401 zio_crypt_info_t crypt_info; 402 uint_t plain_full_len, maclen; 403 404 ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS); 405 406 /* lookup the encryption info */ 407 crypt_info = zio_crypt_table[crypt]; 408 409 /* the mac will always be the last iovec_t in the cipher uio */ 410 maclen = cuio->uio_iov[cuio->uio_iovcnt - 1].iov_len; 411 412 ASSERT(maclen <= ZIO_DATA_MAC_LEN); 413 414 /* setup encryption mechanism (same as crypt) */ 415 mech.cm_type = crypto_mech2id(crypt_info.ci_mechname); 416 417 /* 418 * Strangely, the ICP requires that plain_full_len must include 419 * the MAC length when decrypting, even though the UIO does not 420 * need to have the extra space allocated. 421 */ 422 if (encrypt) { 423 plain_full_len = datalen; 424 } else { 425 plain_full_len = datalen + maclen; 426 } 427 428 /* 429 * setup encryption params (currently only AES CCM and AES GCM 430 * are supported) 431 */ 432 if (crypt_info.ci_crypt_type == ZC_TYPE_CCM) { 433 ccmp.ulNonceSize = ZIO_DATA_IV_LEN; 434 ccmp.ulAuthDataSize = auth_len; 435 ccmp.authData = authbuf; 436 ccmp.ulMACSize = maclen; 437 ccmp.nonce = ivbuf; 438 ccmp.ulDataSize = plain_full_len; 439 440 mech.cm_param = (char *)(&ccmp); 441 mech.cm_param_len = sizeof (CK_AES_CCM_PARAMS); 442 } else { 443 gcmp.ulIvLen = ZIO_DATA_IV_LEN; 444 gcmp.ulIvBits = CRYPTO_BYTES2BITS(ZIO_DATA_IV_LEN); 445 gcmp.ulAADLen = auth_len; 446 gcmp.pAAD = authbuf; 447 gcmp.ulTagBits = CRYPTO_BYTES2BITS(maclen); 448 gcmp.pIv = ivbuf; 449 450 mech.cm_param = (char *)(&gcmp); 451 mech.cm_param_len = sizeof (CK_AES_GCM_PARAMS); 452 } 453 454 /* populate the cipher and plain data structs. */ 455 plaindata.cd_format = CRYPTO_DATA_UIO; 456 plaindata.cd_offset = 0; 457 plaindata.cd_uio = puio; 458 plaindata.cd_length = plain_full_len; 459 460 cipherdata.cd_format = CRYPTO_DATA_UIO; 461 cipherdata.cd_offset = 0; 462 cipherdata.cd_uio = cuio; 463 cipherdata.cd_length = datalen + maclen; 464 465 /* perform the actual encryption */ 466 if (encrypt) { 467 ret = crypto_encrypt(&mech, &plaindata, key, tmpl, &cipherdata); 468 if (ret != CRYPTO_SUCCESS) { 469 ret = SET_ERROR(EIO); 470 goto error; 471 } 472 } else { 473 ret = crypto_decrypt(&mech, &cipherdata, key, tmpl, &plaindata); 474 if (ret != CRYPTO_SUCCESS) { 475 ASSERT3U(ret, ==, CRYPTO_INVALID_MAC); 476 ret = SET_ERROR(ECKSUM); 477 goto error; 478 } 479 } 480 481 return (0); 482 483 error: 484 return (ret); 485 } 486 487 int 488 zio_crypt_key_wrap(crypto_key_t *cwkey, zio_crypt_key_t *key, uint8_t *iv, 489 uint8_t *mac, uint8_t *keydata_out, uint8_t *hmac_keydata_out) 490 { 491 int ret; 492 zfs_uio_t puio, cuio; 493 uint64_t aad[3]; 494 iovec_t plain_iovecs[2], cipher_iovecs[3]; 495 uint64_t crypt = key->zk_crypt; 496 uint_t enc_len, keydata_len, aad_len; 497 498 ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS); 499 500 keydata_len = zio_crypt_table[crypt].ci_keylen; 501 502 /* generate iv for wrapping the master and hmac key */ 503 ret = random_get_pseudo_bytes(iv, WRAPPING_IV_LEN); 504 if (ret != 0) 505 goto error; 506 507 /* initialize zfs_uio_ts */ 508 plain_iovecs[0].iov_base = key->zk_master_keydata; 509 plain_iovecs[0].iov_len = keydata_len; 510 plain_iovecs[1].iov_base = key->zk_hmac_keydata; 511 plain_iovecs[1].iov_len = SHA512_HMAC_KEYLEN; 512 513 cipher_iovecs[0].iov_base = keydata_out; 514 cipher_iovecs[0].iov_len = keydata_len; 515 cipher_iovecs[1].iov_base = hmac_keydata_out; 516 cipher_iovecs[1].iov_len = SHA512_HMAC_KEYLEN; 517 cipher_iovecs[2].iov_base = mac; 518 cipher_iovecs[2].iov_len = WRAPPING_MAC_LEN; 519 520 /* 521 * Although we don't support writing to the old format, we do 522 * support rewrapping the key so that the user can move and 523 * quarantine datasets on the old format. 524 */ 525 if (key->zk_version == 0) { 526 aad_len = sizeof (uint64_t); 527 aad[0] = LE_64(key->zk_guid); 528 } else { 529 ASSERT3U(key->zk_version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION); 530 aad_len = sizeof (uint64_t) * 3; 531 aad[0] = LE_64(key->zk_guid); 532 aad[1] = LE_64(crypt); 533 aad[2] = LE_64(key->zk_version); 534 } 535 536 enc_len = zio_crypt_table[crypt].ci_keylen + SHA512_HMAC_KEYLEN; 537 puio.uio_iov = plain_iovecs; 538 puio.uio_iovcnt = 2; 539 puio.uio_segflg = UIO_SYSSPACE; 540 cuio.uio_iov = cipher_iovecs; 541 cuio.uio_iovcnt = 3; 542 cuio.uio_segflg = UIO_SYSSPACE; 543 544 /* encrypt the keys and store the resulting ciphertext and mac */ 545 ret = zio_do_crypt_uio(B_TRUE, crypt, cwkey, NULL, iv, enc_len, 546 &puio, &cuio, (uint8_t *)aad, aad_len); 547 if (ret != 0) 548 goto error; 549 550 return (0); 551 552 error: 553 return (ret); 554 } 555 556 int 557 zio_crypt_key_unwrap(crypto_key_t *cwkey, uint64_t crypt, uint64_t version, 558 uint64_t guid, uint8_t *keydata, uint8_t *hmac_keydata, uint8_t *iv, 559 uint8_t *mac, zio_crypt_key_t *key) 560 { 561 crypto_mechanism_t mech; 562 zfs_uio_t puio, cuio; 563 uint64_t aad[3]; 564 iovec_t plain_iovecs[2], cipher_iovecs[3]; 565 uint_t enc_len, keydata_len, aad_len; 566 int ret; 567 568 ASSERT3U(crypt, <, ZIO_CRYPT_FUNCTIONS); 569 570 rw_init(&key->zk_salt_lock, NULL, RW_DEFAULT, NULL); 571 572 keydata_len = zio_crypt_table[crypt].ci_keylen; 573 574 /* initialize zfs_uio_ts */ 575 plain_iovecs[0].iov_base = key->zk_master_keydata; 576 plain_iovecs[0].iov_len = keydata_len; 577 plain_iovecs[1].iov_base = key->zk_hmac_keydata; 578 plain_iovecs[1].iov_len = SHA512_HMAC_KEYLEN; 579 580 cipher_iovecs[0].iov_base = keydata; 581 cipher_iovecs[0].iov_len = keydata_len; 582 cipher_iovecs[1].iov_base = hmac_keydata; 583 cipher_iovecs[1].iov_len = SHA512_HMAC_KEYLEN; 584 cipher_iovecs[2].iov_base = mac; 585 cipher_iovecs[2].iov_len = WRAPPING_MAC_LEN; 586 587 if (version == 0) { 588 aad_len = sizeof (uint64_t); 589 aad[0] = LE_64(guid); 590 } else { 591 ASSERT3U(version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION); 592 aad_len = sizeof (uint64_t) * 3; 593 aad[0] = LE_64(guid); 594 aad[1] = LE_64(crypt); 595 aad[2] = LE_64(version); 596 } 597 598 enc_len = keydata_len + SHA512_HMAC_KEYLEN; 599 puio.uio_iov = plain_iovecs; 600 puio.uio_segflg = UIO_SYSSPACE; 601 puio.uio_iovcnt = 2; 602 cuio.uio_iov = cipher_iovecs; 603 cuio.uio_iovcnt = 3; 604 cuio.uio_segflg = UIO_SYSSPACE; 605 606 /* decrypt the keys and store the result in the output buffers */ 607 ret = zio_do_crypt_uio(B_FALSE, crypt, cwkey, NULL, iv, enc_len, 608 &puio, &cuio, (uint8_t *)aad, aad_len); 609 if (ret != 0) 610 goto error; 611 612 /* generate a fresh salt */ 613 ret = random_get_bytes(key->zk_salt, ZIO_DATA_SALT_LEN); 614 if (ret != 0) 615 goto error; 616 617 /* derive the current key from the master key */ 618 ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0, 619 key->zk_salt, ZIO_DATA_SALT_LEN, key->zk_current_keydata, 620 keydata_len); 621 if (ret != 0) 622 goto error; 623 624 /* initialize keys for ICP */ 625 key->zk_current_key.ck_data = key->zk_current_keydata; 626 key->zk_current_key.ck_length = CRYPTO_BYTES2BITS(keydata_len); 627 628 key->zk_hmac_key.ck_data = key->zk_hmac_keydata; 629 key->zk_hmac_key.ck_length = CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN); 630 631 /* 632 * Initialize the crypto templates. It's ok if this fails because 633 * this is just an optimization. 634 */ 635 mech.cm_type = crypto_mech2id(zio_crypt_table[crypt].ci_mechname); 636 ret = crypto_create_ctx_template(&mech, &key->zk_current_key, 637 &key->zk_current_tmpl); 638 if (ret != CRYPTO_SUCCESS) 639 key->zk_current_tmpl = NULL; 640 641 mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC); 642 ret = crypto_create_ctx_template(&mech, &key->zk_hmac_key, 643 &key->zk_hmac_tmpl); 644 if (ret != CRYPTO_SUCCESS) 645 key->zk_hmac_tmpl = NULL; 646 647 key->zk_crypt = crypt; 648 key->zk_version = version; 649 key->zk_guid = guid; 650 key->zk_salt_count = 0; 651 652 return (0); 653 654 error: 655 zio_crypt_key_destroy(key); 656 return (ret); 657 } 658 659 int 660 zio_crypt_generate_iv(uint8_t *ivbuf) 661 { 662 int ret; 663 664 /* randomly generate the IV */ 665 ret = random_get_pseudo_bytes(ivbuf, ZIO_DATA_IV_LEN); 666 if (ret != 0) 667 goto error; 668 669 return (0); 670 671 error: 672 memset(ivbuf, 0, ZIO_DATA_IV_LEN); 673 return (ret); 674 } 675 676 int 677 zio_crypt_do_hmac(zio_crypt_key_t *key, uint8_t *data, uint_t datalen, 678 uint8_t *digestbuf, uint_t digestlen) 679 { 680 int ret; 681 crypto_mechanism_t mech; 682 crypto_data_t in_data, digest_data; 683 uint8_t raw_digestbuf[SHA512_DIGEST_LENGTH]; 684 685 ASSERT3U(digestlen, <=, SHA512_DIGEST_LENGTH); 686 687 /* initialize sha512-hmac mechanism and crypto data */ 688 mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC); 689 mech.cm_param = NULL; 690 mech.cm_param_len = 0; 691 692 /* initialize the crypto data */ 693 in_data.cd_format = CRYPTO_DATA_RAW; 694 in_data.cd_offset = 0; 695 in_data.cd_length = datalen; 696 in_data.cd_raw.iov_base = (char *)data; 697 in_data.cd_raw.iov_len = in_data.cd_length; 698 699 digest_data.cd_format = CRYPTO_DATA_RAW; 700 digest_data.cd_offset = 0; 701 digest_data.cd_length = SHA512_DIGEST_LENGTH; 702 digest_data.cd_raw.iov_base = (char *)raw_digestbuf; 703 digest_data.cd_raw.iov_len = digest_data.cd_length; 704 705 /* generate the hmac */ 706 ret = crypto_mac(&mech, &in_data, &key->zk_hmac_key, key->zk_hmac_tmpl, 707 &digest_data); 708 if (ret != CRYPTO_SUCCESS) { 709 ret = SET_ERROR(EIO); 710 goto error; 711 } 712 713 memcpy(digestbuf, raw_digestbuf, digestlen); 714 715 return (0); 716 717 error: 718 memset(digestbuf, 0, digestlen); 719 return (ret); 720 } 721 722 int 723 zio_crypt_generate_iv_salt_dedup(zio_crypt_key_t *key, uint8_t *data, 724 uint_t datalen, uint8_t *ivbuf, uint8_t *salt) 725 { 726 int ret; 727 uint8_t digestbuf[SHA512_DIGEST_LENGTH]; 728 729 ret = zio_crypt_do_hmac(key, data, datalen, 730 digestbuf, SHA512_DIGEST_LENGTH); 731 if (ret != 0) 732 return (ret); 733 734 memcpy(salt, digestbuf, ZIO_DATA_SALT_LEN); 735 memcpy(ivbuf, digestbuf + ZIO_DATA_SALT_LEN, ZIO_DATA_IV_LEN); 736 737 return (0); 738 } 739 740 /* 741 * The following functions are used to encode and decode encryption parameters 742 * into blkptr_t and zil_header_t. The ICP wants to use these parameters as 743 * byte strings, which normally means that these strings would not need to deal 744 * with byteswapping at all. However, both blkptr_t and zil_header_t may be 745 * byteswapped by lower layers and so we must "undo" that byteswap here upon 746 * decoding and encoding in a non-native byteorder. These functions require 747 * that the byteorder bit is correct before being called. 748 */ 749 void 750 zio_crypt_encode_params_bp(blkptr_t *bp, uint8_t *salt, uint8_t *iv) 751 { 752 uint64_t val64; 753 uint32_t val32; 754 755 ASSERT(BP_IS_ENCRYPTED(bp)); 756 757 if (!BP_SHOULD_BYTESWAP(bp)) { 758 memcpy(&bp->blk_dva[2].dva_word[0], salt, sizeof (uint64_t)); 759 memcpy(&bp->blk_dva[2].dva_word[1], iv, sizeof (uint64_t)); 760 memcpy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t)); 761 BP_SET_IV2(bp, val32); 762 } else { 763 memcpy(&val64, salt, sizeof (uint64_t)); 764 bp->blk_dva[2].dva_word[0] = BSWAP_64(val64); 765 766 memcpy(&val64, iv, sizeof (uint64_t)); 767 bp->blk_dva[2].dva_word[1] = BSWAP_64(val64); 768 769 memcpy(&val32, iv + sizeof (uint64_t), sizeof (uint32_t)); 770 BP_SET_IV2(bp, BSWAP_32(val32)); 771 } 772 } 773 774 void 775 zio_crypt_decode_params_bp(const blkptr_t *bp, uint8_t *salt, uint8_t *iv) 776 { 777 uint64_t val64; 778 uint32_t val32; 779 780 ASSERT(BP_IS_PROTECTED(bp)); 781 782 /* for convenience, so callers don't need to check */ 783 if (BP_IS_AUTHENTICATED(bp)) { 784 memset(salt, 0, ZIO_DATA_SALT_LEN); 785 memset(iv, 0, ZIO_DATA_IV_LEN); 786 return; 787 } 788 789 if (!BP_SHOULD_BYTESWAP(bp)) { 790 memcpy(salt, &bp->blk_dva[2].dva_word[0], sizeof (uint64_t)); 791 memcpy(iv, &bp->blk_dva[2].dva_word[1], sizeof (uint64_t)); 792 793 val32 = (uint32_t)BP_GET_IV2(bp); 794 memcpy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t)); 795 } else { 796 val64 = BSWAP_64(bp->blk_dva[2].dva_word[0]); 797 memcpy(salt, &val64, sizeof (uint64_t)); 798 799 val64 = BSWAP_64(bp->blk_dva[2].dva_word[1]); 800 memcpy(iv, &val64, sizeof (uint64_t)); 801 802 val32 = BSWAP_32((uint32_t)BP_GET_IV2(bp)); 803 memcpy(iv + sizeof (uint64_t), &val32, sizeof (uint32_t)); 804 } 805 } 806 807 void 808 zio_crypt_encode_mac_bp(blkptr_t *bp, uint8_t *mac) 809 { 810 uint64_t val64; 811 812 ASSERT(BP_USES_CRYPT(bp)); 813 ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_OBJSET); 814 815 if (!BP_SHOULD_BYTESWAP(bp)) { 816 memcpy(&bp->blk_cksum.zc_word[2], mac, sizeof (uint64_t)); 817 memcpy(&bp->blk_cksum.zc_word[3], mac + sizeof (uint64_t), 818 sizeof (uint64_t)); 819 } else { 820 memcpy(&val64, mac, sizeof (uint64_t)); 821 bp->blk_cksum.zc_word[2] = BSWAP_64(val64); 822 823 memcpy(&val64, mac + sizeof (uint64_t), sizeof (uint64_t)); 824 bp->blk_cksum.zc_word[3] = BSWAP_64(val64); 825 } 826 } 827 828 void 829 zio_crypt_decode_mac_bp(const blkptr_t *bp, uint8_t *mac) 830 { 831 uint64_t val64; 832 833 ASSERT(BP_USES_CRYPT(bp) || BP_IS_HOLE(bp)); 834 835 /* for convenience, so callers don't need to check */ 836 if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) { 837 memset(mac, 0, ZIO_DATA_MAC_LEN); 838 return; 839 } 840 841 if (!BP_SHOULD_BYTESWAP(bp)) { 842 memcpy(mac, &bp->blk_cksum.zc_word[2], sizeof (uint64_t)); 843 memcpy(mac + sizeof (uint64_t), &bp->blk_cksum.zc_word[3], 844 sizeof (uint64_t)); 845 } else { 846 val64 = BSWAP_64(bp->blk_cksum.zc_word[2]); 847 memcpy(mac, &val64, sizeof (uint64_t)); 848 849 val64 = BSWAP_64(bp->blk_cksum.zc_word[3]); 850 memcpy(mac + sizeof (uint64_t), &val64, sizeof (uint64_t)); 851 } 852 } 853 854 void 855 zio_crypt_encode_mac_zil(void *data, uint8_t *mac) 856 { 857 zil_chain_t *zilc = data; 858 859 memcpy(&zilc->zc_eck.zec_cksum.zc_word[2], mac, sizeof (uint64_t)); 860 memcpy(&zilc->zc_eck.zec_cksum.zc_word[3], mac + sizeof (uint64_t), 861 sizeof (uint64_t)); 862 } 863 864 void 865 zio_crypt_decode_mac_zil(const void *data, uint8_t *mac) 866 { 867 /* 868 * The ZIL MAC is embedded in the block it protects, which will 869 * not have been byteswapped by the time this function has been called. 870 * As a result, we don't need to worry about byteswapping the MAC. 871 */ 872 const zil_chain_t *zilc = data; 873 874 memcpy(mac, &zilc->zc_eck.zec_cksum.zc_word[2], sizeof (uint64_t)); 875 memcpy(mac + sizeof (uint64_t), &zilc->zc_eck.zec_cksum.zc_word[3], 876 sizeof (uint64_t)); 877 } 878 879 /* 880 * This routine takes a block of dnodes (src_abd) and copies only the bonus 881 * buffers to the same offsets in the dst buffer. datalen should be the size 882 * of both the src_abd and the dst buffer (not just the length of the bonus 883 * buffers). 884 */ 885 void 886 zio_crypt_copy_dnode_bonus(abd_t *src_abd, uint8_t *dst, uint_t datalen) 887 { 888 uint_t i, max_dnp = datalen >> DNODE_SHIFT; 889 uint8_t *src; 890 dnode_phys_t *dnp, *sdnp, *ddnp; 891 892 src = abd_borrow_buf_copy(src_abd, datalen); 893 894 sdnp = (dnode_phys_t *)src; 895 ddnp = (dnode_phys_t *)dst; 896 897 for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) { 898 dnp = &sdnp[i]; 899 if (dnp->dn_type != DMU_OT_NONE && 900 DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) && 901 dnp->dn_bonuslen != 0) { 902 memcpy(DN_BONUS(&ddnp[i]), DN_BONUS(dnp), 903 DN_MAX_BONUS_LEN(dnp)); 904 } 905 } 906 907 abd_return_buf(src_abd, src, datalen); 908 } 909 910 /* 911 * This function decides what fields from blk_prop are included in 912 * the on-disk various MAC algorithms. 913 */ 914 static void 915 zio_crypt_bp_zero_nonportable_blkprop(blkptr_t *bp, uint64_t version) 916 { 917 /* 918 * Version 0 did not properly zero out all non-portable fields 919 * as it should have done. We maintain this code so that we can 920 * do read-only imports of pools on this version. 921 */ 922 if (version == 0) { 923 BP_SET_DEDUP(bp, 0); 924 BP_SET_CHECKSUM(bp, 0); 925 BP_SET_PSIZE(bp, SPA_MINBLOCKSIZE); 926 return; 927 } 928 929 ASSERT3U(version, ==, ZIO_CRYPT_KEY_CURRENT_VERSION); 930 931 /* 932 * The hole_birth feature might set these fields even if this bp 933 * is a hole. We zero them out here to guarantee that raw sends 934 * will function with or without the feature. 935 */ 936 if (BP_IS_HOLE(bp)) { 937 bp->blk_prop = 0ULL; 938 return; 939 } 940 941 /* 942 * At L0 we want to verify these fields to ensure that data blocks 943 * can not be reinterpreted. For instance, we do not want an attacker 944 * to trick us into returning raw lz4 compressed data to the user 945 * by modifying the compression bits. At higher levels, we cannot 946 * enforce this policy since raw sends do not convey any information 947 * about indirect blocks, so these values might be different on the 948 * receive side. Fortunately, this does not open any new attack 949 * vectors, since any alterations that can be made to a higher level 950 * bp must still verify the correct order of the layer below it. 951 */ 952 if (BP_GET_LEVEL(bp) != 0) { 953 BP_SET_BYTEORDER(bp, 0); 954 BP_SET_COMPRESS(bp, 0); 955 956 /* 957 * psize cannot be set to zero or it will trigger 958 * asserts, but the value doesn't really matter as 959 * long as it is constant. 960 */ 961 BP_SET_PSIZE(bp, SPA_MINBLOCKSIZE); 962 } 963 964 BP_SET_DEDUP(bp, 0); 965 BP_SET_CHECKSUM(bp, 0); 966 } 967 968 static void 969 zio_crypt_bp_auth_init(uint64_t version, boolean_t should_bswap, blkptr_t *bp, 970 blkptr_auth_buf_t *bab, uint_t *bab_len) 971 { 972 blkptr_t tmpbp = *bp; 973 974 if (should_bswap) 975 byteswap_uint64_array(&tmpbp, sizeof (blkptr_t)); 976 977 ASSERT(BP_USES_CRYPT(&tmpbp) || BP_IS_HOLE(&tmpbp)); 978 ASSERT0(BP_IS_EMBEDDED(&tmpbp)); 979 980 zio_crypt_decode_mac_bp(&tmpbp, bab->bab_mac); 981 982 /* 983 * We always MAC blk_prop in LE to ensure portability. This 984 * must be done after decoding the mac, since the endianness 985 * will get zero'd out here. 986 */ 987 zio_crypt_bp_zero_nonportable_blkprop(&tmpbp, version); 988 bab->bab_prop = LE_64(tmpbp.blk_prop); 989 bab->bab_pad = 0ULL; 990 991 /* version 0 did not include the padding */ 992 *bab_len = sizeof (blkptr_auth_buf_t); 993 if (version == 0) 994 *bab_len -= sizeof (uint64_t); 995 } 996 997 static int 998 zio_crypt_bp_do_hmac_updates(crypto_context_t ctx, uint64_t version, 999 boolean_t should_bswap, blkptr_t *bp) 1000 { 1001 int ret; 1002 uint_t bab_len; 1003 blkptr_auth_buf_t bab; 1004 crypto_data_t cd; 1005 1006 zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len); 1007 cd.cd_format = CRYPTO_DATA_RAW; 1008 cd.cd_offset = 0; 1009 cd.cd_length = bab_len; 1010 cd.cd_raw.iov_base = (char *)&bab; 1011 cd.cd_raw.iov_len = cd.cd_length; 1012 1013 ret = crypto_mac_update(ctx, &cd); 1014 if (ret != CRYPTO_SUCCESS) { 1015 ret = SET_ERROR(EIO); 1016 goto error; 1017 } 1018 1019 return (0); 1020 1021 error: 1022 return (ret); 1023 } 1024 1025 static void 1026 zio_crypt_bp_do_indrect_checksum_updates(SHA2_CTX *ctx, uint64_t version, 1027 boolean_t should_bswap, blkptr_t *bp) 1028 { 1029 uint_t bab_len; 1030 blkptr_auth_buf_t bab; 1031 1032 zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len); 1033 SHA2Update(ctx, &bab, bab_len); 1034 } 1035 1036 static void 1037 zio_crypt_bp_do_aad_updates(uint8_t **aadp, uint_t *aad_len, uint64_t version, 1038 boolean_t should_bswap, blkptr_t *bp) 1039 { 1040 uint_t bab_len; 1041 blkptr_auth_buf_t bab; 1042 1043 zio_crypt_bp_auth_init(version, should_bswap, bp, &bab, &bab_len); 1044 memcpy(*aadp, &bab, bab_len); 1045 *aadp += bab_len; 1046 *aad_len += bab_len; 1047 } 1048 1049 static int 1050 zio_crypt_do_dnode_hmac_updates(crypto_context_t ctx, uint64_t version, 1051 boolean_t should_bswap, dnode_phys_t *dnp) 1052 { 1053 int ret, i; 1054 dnode_phys_t *adnp, tmp_dncore; 1055 size_t dn_core_size = offsetof(dnode_phys_t, dn_blkptr); 1056 boolean_t le_bswap = (should_bswap == ZFS_HOST_BYTEORDER); 1057 crypto_data_t cd; 1058 1059 cd.cd_format = CRYPTO_DATA_RAW; 1060 cd.cd_offset = 0; 1061 1062 /* 1063 * Authenticate the core dnode (masking out non-portable bits). 1064 * We only copy the first 64 bytes we operate on to avoid the overhead 1065 * of copying 512-64 unneeded bytes. The compiler seems to be fine 1066 * with that. 1067 */ 1068 memcpy(&tmp_dncore, dnp, dn_core_size); 1069 adnp = &tmp_dncore; 1070 1071 if (le_bswap) { 1072 adnp->dn_datablkszsec = BSWAP_16(adnp->dn_datablkszsec); 1073 adnp->dn_bonuslen = BSWAP_16(adnp->dn_bonuslen); 1074 adnp->dn_maxblkid = BSWAP_64(adnp->dn_maxblkid); 1075 adnp->dn_used = BSWAP_64(adnp->dn_used); 1076 } 1077 adnp->dn_flags &= DNODE_CRYPT_PORTABLE_FLAGS_MASK; 1078 adnp->dn_used = 0; 1079 1080 cd.cd_length = dn_core_size; 1081 cd.cd_raw.iov_base = (char *)adnp; 1082 cd.cd_raw.iov_len = cd.cd_length; 1083 1084 ret = crypto_mac_update(ctx, &cd); 1085 if (ret != CRYPTO_SUCCESS) { 1086 ret = SET_ERROR(EIO); 1087 goto error; 1088 } 1089 1090 for (i = 0; i < dnp->dn_nblkptr; i++) { 1091 ret = zio_crypt_bp_do_hmac_updates(ctx, version, 1092 should_bswap, &dnp->dn_blkptr[i]); 1093 if (ret != 0) 1094 goto error; 1095 } 1096 1097 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 1098 ret = zio_crypt_bp_do_hmac_updates(ctx, version, 1099 should_bswap, DN_SPILL_BLKPTR(dnp)); 1100 if (ret != 0) 1101 goto error; 1102 } 1103 1104 return (0); 1105 1106 error: 1107 return (ret); 1108 } 1109 1110 /* 1111 * objset_phys_t blocks introduce a number of exceptions to the normal 1112 * authentication process. objset_phys_t's contain 2 separate HMACS for 1113 * protecting the integrity of their data. The portable_mac protects the 1114 * metadnode. This MAC can be sent with a raw send and protects against 1115 * reordering of data within the metadnode. The local_mac protects the user 1116 * accounting objects which are not sent from one system to another. 1117 * 1118 * In addition, objset blocks are the only blocks that can be modified and 1119 * written to disk without the key loaded under certain circumstances. During 1120 * zil_claim() we need to be able to update the zil_header_t to complete 1121 * claiming log blocks and during raw receives we need to write out the 1122 * portable_mac from the send file. Both of these actions are possible 1123 * because these fields are not protected by either MAC so neither one will 1124 * need to modify the MACs without the key. However, when the modified blocks 1125 * are written out they will be byteswapped into the host machine's native 1126 * endianness which will modify fields protected by the MAC. As a result, MAC 1127 * calculation for objset blocks works slightly differently from other block 1128 * types. Where other block types MAC the data in whatever endianness is 1129 * written to disk, objset blocks always MAC little endian version of their 1130 * values. In the code, should_bswap is the value from BP_SHOULD_BYTESWAP() 1131 * and le_bswap indicates whether a byteswap is needed to get this block 1132 * into little endian format. 1133 */ 1134 int 1135 zio_crypt_do_objset_hmacs(zio_crypt_key_t *key, void *data, uint_t datalen, 1136 boolean_t should_bswap, uint8_t *portable_mac, uint8_t *local_mac) 1137 { 1138 int ret; 1139 crypto_mechanism_t mech; 1140 crypto_context_t ctx; 1141 crypto_data_t cd; 1142 objset_phys_t *osp = data; 1143 uint64_t intval; 1144 boolean_t le_bswap = (should_bswap == ZFS_HOST_BYTEORDER); 1145 uint8_t raw_portable_mac[SHA512_DIGEST_LENGTH]; 1146 uint8_t raw_local_mac[SHA512_DIGEST_LENGTH]; 1147 1148 /* initialize HMAC mechanism */ 1149 mech.cm_type = crypto_mech2id(SUN_CKM_SHA512_HMAC); 1150 mech.cm_param = NULL; 1151 mech.cm_param_len = 0; 1152 1153 cd.cd_format = CRYPTO_DATA_RAW; 1154 cd.cd_offset = 0; 1155 1156 /* calculate the portable MAC from the portable fields and metadnode */ 1157 ret = crypto_mac_init(&mech, &key->zk_hmac_key, NULL, &ctx); 1158 if (ret != CRYPTO_SUCCESS) { 1159 ret = SET_ERROR(EIO); 1160 goto error; 1161 } 1162 1163 /* add in the os_type */ 1164 intval = (le_bswap) ? osp->os_type : BSWAP_64(osp->os_type); 1165 cd.cd_length = sizeof (uint64_t); 1166 cd.cd_raw.iov_base = (char *)&intval; 1167 cd.cd_raw.iov_len = cd.cd_length; 1168 1169 ret = crypto_mac_update(ctx, &cd); 1170 if (ret != CRYPTO_SUCCESS) { 1171 ret = SET_ERROR(EIO); 1172 goto error; 1173 } 1174 1175 /* add in the portable os_flags */ 1176 intval = osp->os_flags; 1177 if (should_bswap) 1178 intval = BSWAP_64(intval); 1179 intval &= OBJSET_CRYPT_PORTABLE_FLAGS_MASK; 1180 if (!ZFS_HOST_BYTEORDER) 1181 intval = BSWAP_64(intval); 1182 1183 cd.cd_length = sizeof (uint64_t); 1184 cd.cd_raw.iov_base = (char *)&intval; 1185 cd.cd_raw.iov_len = cd.cd_length; 1186 1187 ret = crypto_mac_update(ctx, &cd); 1188 if (ret != CRYPTO_SUCCESS) { 1189 ret = SET_ERROR(EIO); 1190 goto error; 1191 } 1192 1193 /* add in fields from the metadnode */ 1194 ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version, 1195 should_bswap, &osp->os_meta_dnode); 1196 if (ret) 1197 goto error; 1198 1199 /* store the final digest in a temporary buffer and copy what we need */ 1200 cd.cd_length = SHA512_DIGEST_LENGTH; 1201 cd.cd_raw.iov_base = (char *)raw_portable_mac; 1202 cd.cd_raw.iov_len = cd.cd_length; 1203 1204 ret = crypto_mac_final(ctx, &cd); 1205 if (ret != CRYPTO_SUCCESS) { 1206 ret = SET_ERROR(EIO); 1207 goto error; 1208 } 1209 1210 memcpy(portable_mac, raw_portable_mac, ZIO_OBJSET_MAC_LEN); 1211 1212 /* 1213 * This is necessary here as we check next whether 1214 * OBJSET_FLAG_USERACCOUNTING_COMPLETE is set in order to 1215 * decide if the local_mac should be zeroed out. That flag will always 1216 * be set by dmu_objset_id_quota_upgrade_cb() and 1217 * dmu_objset_userspace_upgrade_cb() if useraccounting has been 1218 * completed. 1219 */ 1220 intval = osp->os_flags; 1221 if (should_bswap) 1222 intval = BSWAP_64(intval); 1223 boolean_t uacct_incomplete = 1224 !(intval & OBJSET_FLAG_USERACCOUNTING_COMPLETE); 1225 1226 /* 1227 * The local MAC protects the user, group and project accounting. 1228 * If these objects are not present, the local MAC is zeroed out. 1229 */ 1230 if (uacct_incomplete || 1231 (datalen >= OBJSET_PHYS_SIZE_V3 && 1232 osp->os_userused_dnode.dn_type == DMU_OT_NONE && 1233 osp->os_groupused_dnode.dn_type == DMU_OT_NONE && 1234 osp->os_projectused_dnode.dn_type == DMU_OT_NONE) || 1235 (datalen >= OBJSET_PHYS_SIZE_V2 && 1236 osp->os_userused_dnode.dn_type == DMU_OT_NONE && 1237 osp->os_groupused_dnode.dn_type == DMU_OT_NONE) || 1238 (datalen <= OBJSET_PHYS_SIZE_V1)) { 1239 memset(local_mac, 0, ZIO_OBJSET_MAC_LEN); 1240 return (0); 1241 } 1242 1243 /* calculate the local MAC from the userused and groupused dnodes */ 1244 ret = crypto_mac_init(&mech, &key->zk_hmac_key, NULL, &ctx); 1245 if (ret != CRYPTO_SUCCESS) { 1246 ret = SET_ERROR(EIO); 1247 goto error; 1248 } 1249 1250 /* add in the non-portable os_flags */ 1251 intval = osp->os_flags; 1252 if (should_bswap) 1253 intval = BSWAP_64(intval); 1254 intval &= ~OBJSET_CRYPT_PORTABLE_FLAGS_MASK; 1255 if (!ZFS_HOST_BYTEORDER) 1256 intval = BSWAP_64(intval); 1257 1258 cd.cd_length = sizeof (uint64_t); 1259 cd.cd_raw.iov_base = (char *)&intval; 1260 cd.cd_raw.iov_len = cd.cd_length; 1261 1262 ret = crypto_mac_update(ctx, &cd); 1263 if (ret != CRYPTO_SUCCESS) { 1264 ret = SET_ERROR(EIO); 1265 goto error; 1266 } 1267 1268 /* add in fields from the user accounting dnodes */ 1269 if (osp->os_userused_dnode.dn_type != DMU_OT_NONE) { 1270 ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version, 1271 should_bswap, &osp->os_userused_dnode); 1272 if (ret) 1273 goto error; 1274 } 1275 1276 if (osp->os_groupused_dnode.dn_type != DMU_OT_NONE) { 1277 ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version, 1278 should_bswap, &osp->os_groupused_dnode); 1279 if (ret) 1280 goto error; 1281 } 1282 1283 if (osp->os_projectused_dnode.dn_type != DMU_OT_NONE && 1284 datalen >= OBJSET_PHYS_SIZE_V3) { 1285 ret = zio_crypt_do_dnode_hmac_updates(ctx, key->zk_version, 1286 should_bswap, &osp->os_projectused_dnode); 1287 if (ret) 1288 goto error; 1289 } 1290 1291 /* store the final digest in a temporary buffer and copy what we need */ 1292 cd.cd_length = SHA512_DIGEST_LENGTH; 1293 cd.cd_raw.iov_base = (char *)raw_local_mac; 1294 cd.cd_raw.iov_len = cd.cd_length; 1295 1296 ret = crypto_mac_final(ctx, &cd); 1297 if (ret != CRYPTO_SUCCESS) { 1298 ret = SET_ERROR(EIO); 1299 goto error; 1300 } 1301 1302 memcpy(local_mac, raw_local_mac, ZIO_OBJSET_MAC_LEN); 1303 1304 return (0); 1305 1306 error: 1307 memset(portable_mac, 0, ZIO_OBJSET_MAC_LEN); 1308 memset(local_mac, 0, ZIO_OBJSET_MAC_LEN); 1309 return (ret); 1310 } 1311 1312 static void 1313 zio_crypt_destroy_uio(zfs_uio_t *uio) 1314 { 1315 if (uio->uio_iov) 1316 kmem_free(uio->uio_iov, uio->uio_iovcnt * sizeof (iovec_t)); 1317 } 1318 1319 /* 1320 * This function parses an uncompressed indirect block and returns a checksum 1321 * of all the portable fields from all of the contained bps. The portable 1322 * fields are the MAC and all of the fields from blk_prop except for the dedup, 1323 * checksum, and psize bits. For an explanation of the purpose of this, see 1324 * the comment block on object set authentication. 1325 */ 1326 static int 1327 zio_crypt_do_indirect_mac_checksum_impl(boolean_t generate, void *buf, 1328 uint_t datalen, uint64_t version, boolean_t byteswap, uint8_t *cksum) 1329 { 1330 blkptr_t *bp; 1331 int i, epb = datalen >> SPA_BLKPTRSHIFT; 1332 SHA2_CTX ctx; 1333 uint8_t digestbuf[SHA512_DIGEST_LENGTH]; 1334 1335 /* checksum all of the MACs from the layer below */ 1336 SHA2Init(SHA512, &ctx); 1337 for (i = 0, bp = buf; i < epb; i++, bp++) { 1338 zio_crypt_bp_do_indrect_checksum_updates(&ctx, version, 1339 byteswap, bp); 1340 } 1341 SHA2Final(digestbuf, &ctx); 1342 1343 if (generate) { 1344 memcpy(cksum, digestbuf, ZIO_DATA_MAC_LEN); 1345 return (0); 1346 } 1347 1348 if (memcmp(digestbuf, cksum, ZIO_DATA_MAC_LEN) != 0) 1349 return (SET_ERROR(ECKSUM)); 1350 1351 return (0); 1352 } 1353 1354 int 1355 zio_crypt_do_indirect_mac_checksum(boolean_t generate, void *buf, 1356 uint_t datalen, boolean_t byteswap, uint8_t *cksum) 1357 { 1358 int ret; 1359 1360 /* 1361 * Unfortunately, callers of this function will not always have 1362 * easy access to the on-disk format version. This info is 1363 * normally found in the DSL Crypto Key, but the checksum-of-MACs 1364 * is expected to be verifiable even when the key isn't loaded. 1365 * Here, instead of doing a ZAP lookup for the version for each 1366 * zio, we simply try both existing formats. 1367 */ 1368 ret = zio_crypt_do_indirect_mac_checksum_impl(generate, buf, 1369 datalen, ZIO_CRYPT_KEY_CURRENT_VERSION, byteswap, cksum); 1370 if (ret == ECKSUM) { 1371 ASSERT(!generate); 1372 ret = zio_crypt_do_indirect_mac_checksum_impl(generate, 1373 buf, datalen, 0, byteswap, cksum); 1374 } 1375 1376 return (ret); 1377 } 1378 1379 int 1380 zio_crypt_do_indirect_mac_checksum_abd(boolean_t generate, abd_t *abd, 1381 uint_t datalen, boolean_t byteswap, uint8_t *cksum) 1382 { 1383 int ret; 1384 void *buf; 1385 1386 buf = abd_borrow_buf_copy(abd, datalen); 1387 ret = zio_crypt_do_indirect_mac_checksum(generate, buf, datalen, 1388 byteswap, cksum); 1389 abd_return_buf(abd, buf, datalen); 1390 1391 return (ret); 1392 } 1393 1394 /* 1395 * Special case handling routine for encrypting / decrypting ZIL blocks. 1396 * We do not check for the older ZIL chain because the encryption feature 1397 * was not available before the newer ZIL chain was introduced. The goal 1398 * here is to encrypt everything except the blkptr_t of a lr_write_t and 1399 * the zil_chain_t header. Everything that is not encrypted is authenticated. 1400 */ 1401 static int 1402 zio_crypt_init_uios_zil(boolean_t encrypt, uint8_t *plainbuf, 1403 uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, zfs_uio_t *puio, 1404 zfs_uio_t *cuio, uint_t *enc_len, uint8_t **authbuf, uint_t *auth_len, 1405 boolean_t *no_crypt) 1406 { 1407 int ret; 1408 uint64_t txtype, lr_len; 1409 uint_t nr_src, nr_dst, crypt_len; 1410 uint_t aad_len = 0, nr_iovecs = 0, total_len = 0; 1411 iovec_t *src_iovecs = NULL, *dst_iovecs = NULL; 1412 uint8_t *src, *dst, *slrp, *dlrp, *blkend, *aadp; 1413 zil_chain_t *zilc; 1414 lr_t *lr; 1415 uint8_t *aadbuf = zio_buf_alloc(datalen); 1416 1417 /* cipherbuf always needs an extra iovec for the MAC */ 1418 if (encrypt) { 1419 src = plainbuf; 1420 dst = cipherbuf; 1421 nr_src = 0; 1422 nr_dst = 1; 1423 } else { 1424 src = cipherbuf; 1425 dst = plainbuf; 1426 nr_src = 1; 1427 nr_dst = 0; 1428 } 1429 memset(dst, 0, datalen); 1430 1431 /* find the start and end record of the log block */ 1432 zilc = (zil_chain_t *)src; 1433 slrp = src + sizeof (zil_chain_t); 1434 aadp = aadbuf; 1435 blkend = src + ((byteswap) ? BSWAP_64(zilc->zc_nused) : zilc->zc_nused); 1436 1437 /* calculate the number of encrypted iovecs we will need */ 1438 for (; slrp < blkend; slrp += lr_len) { 1439 lr = (lr_t *)slrp; 1440 1441 if (!byteswap) { 1442 txtype = lr->lrc_txtype; 1443 lr_len = lr->lrc_reclen; 1444 } else { 1445 txtype = BSWAP_64(lr->lrc_txtype); 1446 lr_len = BSWAP_64(lr->lrc_reclen); 1447 } 1448 1449 nr_iovecs++; 1450 if (txtype == TX_WRITE && lr_len != sizeof (lr_write_t)) 1451 nr_iovecs++; 1452 } 1453 1454 nr_src += nr_iovecs; 1455 nr_dst += nr_iovecs; 1456 1457 /* allocate the iovec arrays */ 1458 if (nr_src != 0) { 1459 src_iovecs = kmem_alloc(nr_src * sizeof (iovec_t), KM_SLEEP); 1460 if (src_iovecs == NULL) { 1461 ret = SET_ERROR(ENOMEM); 1462 goto error; 1463 } 1464 } 1465 1466 if (nr_dst != 0) { 1467 dst_iovecs = kmem_alloc(nr_dst * sizeof (iovec_t), KM_SLEEP); 1468 if (dst_iovecs == NULL) { 1469 ret = SET_ERROR(ENOMEM); 1470 goto error; 1471 } 1472 } 1473 1474 /* 1475 * Copy the plain zil header over and authenticate everything except 1476 * the checksum that will store our MAC. If we are writing the data 1477 * the embedded checksum will not have been calculated yet, so we don't 1478 * authenticate that. 1479 */ 1480 memcpy(dst, src, sizeof (zil_chain_t)); 1481 memcpy(aadp, src, sizeof (zil_chain_t) - sizeof (zio_eck_t)); 1482 aadp += sizeof (zil_chain_t) - sizeof (zio_eck_t); 1483 aad_len += sizeof (zil_chain_t) - sizeof (zio_eck_t); 1484 1485 /* loop over records again, filling in iovecs */ 1486 nr_iovecs = 0; 1487 slrp = src + sizeof (zil_chain_t); 1488 dlrp = dst + sizeof (zil_chain_t); 1489 1490 for (; slrp < blkend; slrp += lr_len, dlrp += lr_len) { 1491 lr = (lr_t *)slrp; 1492 1493 if (!byteswap) { 1494 txtype = lr->lrc_txtype; 1495 lr_len = lr->lrc_reclen; 1496 } else { 1497 txtype = BSWAP_64(lr->lrc_txtype); 1498 lr_len = BSWAP_64(lr->lrc_reclen); 1499 } 1500 1501 /* copy the common lr_t */ 1502 memcpy(dlrp, slrp, sizeof (lr_t)); 1503 memcpy(aadp, slrp, sizeof (lr_t)); 1504 aadp += sizeof (lr_t); 1505 aad_len += sizeof (lr_t); 1506 1507 ASSERT3P(src_iovecs, !=, NULL); 1508 ASSERT3P(dst_iovecs, !=, NULL); 1509 1510 /* 1511 * If this is a TX_WRITE record we want to encrypt everything 1512 * except the bp if exists. If the bp does exist we want to 1513 * authenticate it. 1514 */ 1515 if (txtype == TX_WRITE) { 1516 crypt_len = sizeof (lr_write_t) - 1517 sizeof (lr_t) - sizeof (blkptr_t); 1518 src_iovecs[nr_iovecs].iov_base = slrp + sizeof (lr_t); 1519 src_iovecs[nr_iovecs].iov_len = crypt_len; 1520 dst_iovecs[nr_iovecs].iov_base = dlrp + sizeof (lr_t); 1521 dst_iovecs[nr_iovecs].iov_len = crypt_len; 1522 1523 /* copy the bp now since it will not be encrypted */ 1524 memcpy(dlrp + sizeof (lr_write_t) - sizeof (blkptr_t), 1525 slrp + sizeof (lr_write_t) - sizeof (blkptr_t), 1526 sizeof (blkptr_t)); 1527 memcpy(aadp, 1528 slrp + sizeof (lr_write_t) - sizeof (blkptr_t), 1529 sizeof (blkptr_t)); 1530 aadp += sizeof (blkptr_t); 1531 aad_len += sizeof (blkptr_t); 1532 nr_iovecs++; 1533 total_len += crypt_len; 1534 1535 if (lr_len != sizeof (lr_write_t)) { 1536 crypt_len = lr_len - sizeof (lr_write_t); 1537 src_iovecs[nr_iovecs].iov_base = 1538 slrp + sizeof (lr_write_t); 1539 src_iovecs[nr_iovecs].iov_len = crypt_len; 1540 dst_iovecs[nr_iovecs].iov_base = 1541 dlrp + sizeof (lr_write_t); 1542 dst_iovecs[nr_iovecs].iov_len = crypt_len; 1543 nr_iovecs++; 1544 total_len += crypt_len; 1545 } 1546 } else { 1547 crypt_len = lr_len - sizeof (lr_t); 1548 src_iovecs[nr_iovecs].iov_base = slrp + sizeof (lr_t); 1549 src_iovecs[nr_iovecs].iov_len = crypt_len; 1550 dst_iovecs[nr_iovecs].iov_base = dlrp + sizeof (lr_t); 1551 dst_iovecs[nr_iovecs].iov_len = crypt_len; 1552 nr_iovecs++; 1553 total_len += crypt_len; 1554 } 1555 } 1556 1557 *no_crypt = (nr_iovecs == 0); 1558 *enc_len = total_len; 1559 *authbuf = aadbuf; 1560 *auth_len = aad_len; 1561 1562 if (encrypt) { 1563 puio->uio_iov = src_iovecs; 1564 puio->uio_iovcnt = nr_src; 1565 cuio->uio_iov = dst_iovecs; 1566 cuio->uio_iovcnt = nr_dst; 1567 } else { 1568 puio->uio_iov = dst_iovecs; 1569 puio->uio_iovcnt = nr_dst; 1570 cuio->uio_iov = src_iovecs; 1571 cuio->uio_iovcnt = nr_src; 1572 } 1573 1574 return (0); 1575 1576 error: 1577 zio_buf_free(aadbuf, datalen); 1578 if (src_iovecs != NULL) 1579 kmem_free(src_iovecs, nr_src * sizeof (iovec_t)); 1580 if (dst_iovecs != NULL) 1581 kmem_free(dst_iovecs, nr_dst * sizeof (iovec_t)); 1582 1583 *enc_len = 0; 1584 *authbuf = NULL; 1585 *auth_len = 0; 1586 *no_crypt = B_FALSE; 1587 puio->uio_iov = NULL; 1588 puio->uio_iovcnt = 0; 1589 cuio->uio_iov = NULL; 1590 cuio->uio_iovcnt = 0; 1591 return (ret); 1592 } 1593 1594 /* 1595 * Special case handling routine for encrypting / decrypting dnode blocks. 1596 */ 1597 static int 1598 zio_crypt_init_uios_dnode(boolean_t encrypt, uint64_t version, 1599 uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, 1600 zfs_uio_t *puio, zfs_uio_t *cuio, uint_t *enc_len, uint8_t **authbuf, 1601 uint_t *auth_len, boolean_t *no_crypt) 1602 { 1603 int ret; 1604 uint_t nr_src, nr_dst, crypt_len; 1605 uint_t aad_len = 0, nr_iovecs = 0, total_len = 0; 1606 uint_t i, j, max_dnp = datalen >> DNODE_SHIFT; 1607 iovec_t *src_iovecs = NULL, *dst_iovecs = NULL; 1608 uint8_t *src, *dst, *aadp; 1609 dnode_phys_t *dnp, *adnp, *sdnp, *ddnp; 1610 uint8_t *aadbuf = zio_buf_alloc(datalen); 1611 1612 if (encrypt) { 1613 src = plainbuf; 1614 dst = cipherbuf; 1615 nr_src = 0; 1616 nr_dst = 1; 1617 } else { 1618 src = cipherbuf; 1619 dst = plainbuf; 1620 nr_src = 1; 1621 nr_dst = 0; 1622 } 1623 1624 sdnp = (dnode_phys_t *)src; 1625 ddnp = (dnode_phys_t *)dst; 1626 aadp = aadbuf; 1627 1628 /* 1629 * Count the number of iovecs we will need to do the encryption by 1630 * counting the number of bonus buffers that need to be encrypted. 1631 */ 1632 for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) { 1633 /* 1634 * This block may still be byteswapped. However, all of the 1635 * values we use are either uint8_t's (for which byteswapping 1636 * is a noop) or a * != 0 check, which will work regardless 1637 * of whether or not we byteswap. 1638 */ 1639 if (sdnp[i].dn_type != DMU_OT_NONE && 1640 DMU_OT_IS_ENCRYPTED(sdnp[i].dn_bonustype) && 1641 sdnp[i].dn_bonuslen != 0) { 1642 nr_iovecs++; 1643 } 1644 } 1645 1646 nr_src += nr_iovecs; 1647 nr_dst += nr_iovecs; 1648 1649 if (nr_src != 0) { 1650 src_iovecs = kmem_alloc(nr_src * sizeof (iovec_t), KM_SLEEP); 1651 if (src_iovecs == NULL) { 1652 ret = SET_ERROR(ENOMEM); 1653 goto error; 1654 } 1655 } 1656 1657 if (nr_dst != 0) { 1658 dst_iovecs = kmem_alloc(nr_dst * sizeof (iovec_t), KM_SLEEP); 1659 if (dst_iovecs == NULL) { 1660 ret = SET_ERROR(ENOMEM); 1661 goto error; 1662 } 1663 } 1664 1665 nr_iovecs = 0; 1666 1667 /* 1668 * Iterate through the dnodes again, this time filling in the uios 1669 * we allocated earlier. We also concatenate any data we want to 1670 * authenticate onto aadbuf. 1671 */ 1672 for (i = 0; i < max_dnp; i += sdnp[i].dn_extra_slots + 1) { 1673 dnp = &sdnp[i]; 1674 1675 /* copy over the core fields and blkptrs (kept as plaintext) */ 1676 memcpy(&ddnp[i], dnp, 1677 (uint8_t *)DN_BONUS(dnp) - (uint8_t *)dnp); 1678 1679 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 1680 memcpy(DN_SPILL_BLKPTR(&ddnp[i]), DN_SPILL_BLKPTR(dnp), 1681 sizeof (blkptr_t)); 1682 } 1683 1684 /* 1685 * Handle authenticated data. We authenticate everything in 1686 * the dnode that can be brought over when we do a raw send. 1687 * This includes all of the core fields as well as the MACs 1688 * stored in the bp checksums and all of the portable bits 1689 * from blk_prop. We include the dnode padding here in case it 1690 * ever gets used in the future. Some dn_flags and dn_used are 1691 * not portable so we mask those out values out of the 1692 * authenticated data. 1693 */ 1694 crypt_len = offsetof(dnode_phys_t, dn_blkptr); 1695 memcpy(aadp, dnp, crypt_len); 1696 adnp = (dnode_phys_t *)aadp; 1697 adnp->dn_flags &= DNODE_CRYPT_PORTABLE_FLAGS_MASK; 1698 adnp->dn_used = 0; 1699 aadp += crypt_len; 1700 aad_len += crypt_len; 1701 1702 for (j = 0; j < dnp->dn_nblkptr; j++) { 1703 zio_crypt_bp_do_aad_updates(&aadp, &aad_len, 1704 version, byteswap, &dnp->dn_blkptr[j]); 1705 } 1706 1707 if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) { 1708 zio_crypt_bp_do_aad_updates(&aadp, &aad_len, 1709 version, byteswap, DN_SPILL_BLKPTR(dnp)); 1710 } 1711 1712 /* 1713 * If this bonus buffer needs to be encrypted, we prepare an 1714 * iovec_t. The encryption / decryption functions will fill 1715 * this in for us with the encrypted or decrypted data. 1716 * Otherwise we add the bonus buffer to the authenticated 1717 * data buffer and copy it over to the destination. The 1718 * encrypted iovec extends to DN_MAX_BONUS_LEN(dnp) so that 1719 * we can guarantee alignment with the AES block size 1720 * (128 bits). 1721 */ 1722 crypt_len = DN_MAX_BONUS_LEN(dnp); 1723 if (dnp->dn_type != DMU_OT_NONE && 1724 DMU_OT_IS_ENCRYPTED(dnp->dn_bonustype) && 1725 dnp->dn_bonuslen != 0) { 1726 ASSERT3U(nr_iovecs, <, nr_src); 1727 ASSERT3U(nr_iovecs, <, nr_dst); 1728 ASSERT3P(src_iovecs, !=, NULL); 1729 ASSERT3P(dst_iovecs, !=, NULL); 1730 src_iovecs[nr_iovecs].iov_base = DN_BONUS(dnp); 1731 src_iovecs[nr_iovecs].iov_len = crypt_len; 1732 dst_iovecs[nr_iovecs].iov_base = DN_BONUS(&ddnp[i]); 1733 dst_iovecs[nr_iovecs].iov_len = crypt_len; 1734 1735 nr_iovecs++; 1736 total_len += crypt_len; 1737 } else { 1738 memcpy(DN_BONUS(&ddnp[i]), DN_BONUS(dnp), crypt_len); 1739 memcpy(aadp, DN_BONUS(dnp), crypt_len); 1740 aadp += crypt_len; 1741 aad_len += crypt_len; 1742 } 1743 } 1744 1745 *no_crypt = (nr_iovecs == 0); 1746 *enc_len = total_len; 1747 *authbuf = aadbuf; 1748 *auth_len = aad_len; 1749 1750 if (encrypt) { 1751 puio->uio_iov = src_iovecs; 1752 puio->uio_iovcnt = nr_src; 1753 cuio->uio_iov = dst_iovecs; 1754 cuio->uio_iovcnt = nr_dst; 1755 } else { 1756 puio->uio_iov = dst_iovecs; 1757 puio->uio_iovcnt = nr_dst; 1758 cuio->uio_iov = src_iovecs; 1759 cuio->uio_iovcnt = nr_src; 1760 } 1761 1762 return (0); 1763 1764 error: 1765 zio_buf_free(aadbuf, datalen); 1766 if (src_iovecs != NULL) 1767 kmem_free(src_iovecs, nr_src * sizeof (iovec_t)); 1768 if (dst_iovecs != NULL) 1769 kmem_free(dst_iovecs, nr_dst * sizeof (iovec_t)); 1770 1771 *enc_len = 0; 1772 *authbuf = NULL; 1773 *auth_len = 0; 1774 *no_crypt = B_FALSE; 1775 puio->uio_iov = NULL; 1776 puio->uio_iovcnt = 0; 1777 cuio->uio_iov = NULL; 1778 cuio->uio_iovcnt = 0; 1779 return (ret); 1780 } 1781 1782 static int 1783 zio_crypt_init_uios_normal(boolean_t encrypt, uint8_t *plainbuf, 1784 uint8_t *cipherbuf, uint_t datalen, zfs_uio_t *puio, zfs_uio_t *cuio, 1785 uint_t *enc_len) 1786 { 1787 (void) encrypt; 1788 int ret; 1789 uint_t nr_plain = 1, nr_cipher = 2; 1790 iovec_t *plain_iovecs = NULL, *cipher_iovecs = NULL; 1791 1792 /* allocate the iovecs for the plain and cipher data */ 1793 plain_iovecs = kmem_alloc(nr_plain * sizeof (iovec_t), 1794 KM_SLEEP); 1795 if (!plain_iovecs) { 1796 ret = SET_ERROR(ENOMEM); 1797 goto error; 1798 } 1799 1800 cipher_iovecs = kmem_alloc(nr_cipher * sizeof (iovec_t), 1801 KM_SLEEP); 1802 if (!cipher_iovecs) { 1803 ret = SET_ERROR(ENOMEM); 1804 goto error; 1805 } 1806 1807 plain_iovecs[0].iov_base = plainbuf; 1808 plain_iovecs[0].iov_len = datalen; 1809 cipher_iovecs[0].iov_base = cipherbuf; 1810 cipher_iovecs[0].iov_len = datalen; 1811 1812 *enc_len = datalen; 1813 puio->uio_iov = plain_iovecs; 1814 puio->uio_iovcnt = nr_plain; 1815 cuio->uio_iov = cipher_iovecs; 1816 cuio->uio_iovcnt = nr_cipher; 1817 1818 return (0); 1819 1820 error: 1821 if (plain_iovecs != NULL) 1822 kmem_free(plain_iovecs, nr_plain * sizeof (iovec_t)); 1823 if (cipher_iovecs != NULL) 1824 kmem_free(cipher_iovecs, nr_cipher * sizeof (iovec_t)); 1825 1826 *enc_len = 0; 1827 puio->uio_iov = NULL; 1828 puio->uio_iovcnt = 0; 1829 cuio->uio_iov = NULL; 1830 cuio->uio_iovcnt = 0; 1831 return (ret); 1832 } 1833 1834 /* 1835 * This function builds up the plaintext (puio) and ciphertext (cuio) uios so 1836 * that they can be used for encryption and decryption by zio_do_crypt_uio(). 1837 * Most blocks will use zio_crypt_init_uios_normal(), with ZIL and dnode blocks 1838 * requiring special handling to parse out pieces that are to be encrypted. The 1839 * authbuf is used by these special cases to store additional authenticated 1840 * data (AAD) for the encryption modes. 1841 */ 1842 static int 1843 zio_crypt_init_uios(boolean_t encrypt, uint64_t version, dmu_object_type_t ot, 1844 uint8_t *plainbuf, uint8_t *cipherbuf, uint_t datalen, boolean_t byteswap, 1845 uint8_t *mac, zfs_uio_t *puio, zfs_uio_t *cuio, uint_t *enc_len, 1846 uint8_t **authbuf, uint_t *auth_len, boolean_t *no_crypt) 1847 { 1848 int ret; 1849 iovec_t *mac_iov; 1850 1851 ASSERT(DMU_OT_IS_ENCRYPTED(ot) || ot == DMU_OT_NONE); 1852 1853 /* route to handler */ 1854 switch (ot) { 1855 case DMU_OT_INTENT_LOG: 1856 ret = zio_crypt_init_uios_zil(encrypt, plainbuf, cipherbuf, 1857 datalen, byteswap, puio, cuio, enc_len, authbuf, auth_len, 1858 no_crypt); 1859 break; 1860 case DMU_OT_DNODE: 1861 ret = zio_crypt_init_uios_dnode(encrypt, version, plainbuf, 1862 cipherbuf, datalen, byteswap, puio, cuio, enc_len, authbuf, 1863 auth_len, no_crypt); 1864 break; 1865 default: 1866 ret = zio_crypt_init_uios_normal(encrypt, plainbuf, cipherbuf, 1867 datalen, puio, cuio, enc_len); 1868 *authbuf = NULL; 1869 *auth_len = 0; 1870 *no_crypt = B_FALSE; 1871 break; 1872 } 1873 1874 if (ret != 0) 1875 goto error; 1876 1877 /* populate the uios */ 1878 puio->uio_segflg = UIO_SYSSPACE; 1879 cuio->uio_segflg = UIO_SYSSPACE; 1880 1881 mac_iov = ((iovec_t *)&cuio->uio_iov[cuio->uio_iovcnt - 1]); 1882 mac_iov->iov_base = mac; 1883 mac_iov->iov_len = ZIO_DATA_MAC_LEN; 1884 1885 return (0); 1886 1887 error: 1888 return (ret); 1889 } 1890 1891 /* 1892 * Primary encryption / decryption entrypoint for zio data. 1893 */ 1894 int 1895 zio_do_crypt_data(boolean_t encrypt, zio_crypt_key_t *key, 1896 dmu_object_type_t ot, boolean_t byteswap, uint8_t *salt, uint8_t *iv, 1897 uint8_t *mac, uint_t datalen, uint8_t *plainbuf, uint8_t *cipherbuf, 1898 boolean_t *no_crypt) 1899 { 1900 int ret; 1901 boolean_t locked = B_FALSE; 1902 uint64_t crypt = key->zk_crypt; 1903 uint_t keydata_len = zio_crypt_table[crypt].ci_keylen; 1904 uint_t enc_len, auth_len; 1905 zfs_uio_t puio, cuio; 1906 uint8_t enc_keydata[MASTER_KEY_MAX_LEN]; 1907 crypto_key_t tmp_ckey, *ckey = NULL; 1908 crypto_ctx_template_t tmpl; 1909 uint8_t *authbuf = NULL; 1910 1911 memset(&puio, 0, sizeof (puio)); 1912 memset(&cuio, 0, sizeof (cuio)); 1913 1914 /* 1915 * If the needed key is the current one, just use it. Otherwise we 1916 * need to generate a temporary one from the given salt + master key. 1917 * If we are encrypting, we must return a copy of the current salt 1918 * so that it can be stored in the blkptr_t. 1919 */ 1920 rw_enter(&key->zk_salt_lock, RW_READER); 1921 locked = B_TRUE; 1922 1923 if (memcmp(salt, key->zk_salt, ZIO_DATA_SALT_LEN) == 0) { 1924 ckey = &key->zk_current_key; 1925 tmpl = key->zk_current_tmpl; 1926 } else { 1927 rw_exit(&key->zk_salt_lock); 1928 locked = B_FALSE; 1929 1930 ret = hkdf_sha512(key->zk_master_keydata, keydata_len, NULL, 0, 1931 salt, ZIO_DATA_SALT_LEN, enc_keydata, keydata_len); 1932 if (ret != 0) 1933 goto error; 1934 1935 tmp_ckey.ck_data = enc_keydata; 1936 tmp_ckey.ck_length = CRYPTO_BYTES2BITS(keydata_len); 1937 1938 ckey = &tmp_ckey; 1939 tmpl = NULL; 1940 } 1941 1942 /* 1943 * Attempt to use QAT acceleration if we can. We currently don't 1944 * do this for metadnode and ZIL blocks, since they have a much 1945 * more involved buffer layout and the qat_crypt() function only 1946 * works in-place. 1947 */ 1948 if (qat_crypt_use_accel(datalen) && 1949 ot != DMU_OT_INTENT_LOG && ot != DMU_OT_DNODE) { 1950 uint8_t *srcbuf, *dstbuf; 1951 1952 if (encrypt) { 1953 srcbuf = plainbuf; 1954 dstbuf = cipherbuf; 1955 } else { 1956 srcbuf = cipherbuf; 1957 dstbuf = plainbuf; 1958 } 1959 1960 ret = qat_crypt((encrypt) ? QAT_ENCRYPT : QAT_DECRYPT, srcbuf, 1961 dstbuf, NULL, 0, iv, mac, ckey, key->zk_crypt, datalen); 1962 if (ret == CPA_STATUS_SUCCESS) { 1963 if (locked) { 1964 rw_exit(&key->zk_salt_lock); 1965 locked = B_FALSE; 1966 } 1967 1968 return (0); 1969 } 1970 /* If the hardware implementation fails fall back to software */ 1971 } 1972 1973 /* create uios for encryption */ 1974 ret = zio_crypt_init_uios(encrypt, key->zk_version, ot, plainbuf, 1975 cipherbuf, datalen, byteswap, mac, &puio, &cuio, &enc_len, 1976 &authbuf, &auth_len, no_crypt); 1977 if (ret != 0) 1978 goto error; 1979 1980 /* perform the encryption / decryption in software */ 1981 ret = zio_do_crypt_uio(encrypt, key->zk_crypt, ckey, tmpl, iv, enc_len, 1982 &puio, &cuio, authbuf, auth_len); 1983 if (ret != 0) 1984 goto error; 1985 1986 if (locked) { 1987 rw_exit(&key->zk_salt_lock); 1988 } 1989 1990 if (authbuf != NULL) 1991 zio_buf_free(authbuf, datalen); 1992 if (ckey == &tmp_ckey) 1993 memset(enc_keydata, 0, keydata_len); 1994 zio_crypt_destroy_uio(&puio); 1995 zio_crypt_destroy_uio(&cuio); 1996 1997 return (0); 1998 1999 error: 2000 if (locked) 2001 rw_exit(&key->zk_salt_lock); 2002 if (authbuf != NULL) 2003 zio_buf_free(authbuf, datalen); 2004 if (ckey == &tmp_ckey) 2005 memset(enc_keydata, 0, keydata_len); 2006 zio_crypt_destroy_uio(&puio); 2007 zio_crypt_destroy_uio(&cuio); 2008 2009 return (ret); 2010 } 2011 2012 /* 2013 * Simple wrapper around zio_do_crypt_data() to work with abd's instead of 2014 * linear buffers. 2015 */ 2016 int 2017 zio_do_crypt_abd(boolean_t encrypt, zio_crypt_key_t *key, dmu_object_type_t ot, 2018 boolean_t byteswap, uint8_t *salt, uint8_t *iv, uint8_t *mac, 2019 uint_t datalen, abd_t *pabd, abd_t *cabd, boolean_t *no_crypt) 2020 { 2021 int ret; 2022 void *ptmp, *ctmp; 2023 2024 if (encrypt) { 2025 ptmp = abd_borrow_buf_copy(pabd, datalen); 2026 ctmp = abd_borrow_buf(cabd, datalen); 2027 } else { 2028 ptmp = abd_borrow_buf(pabd, datalen); 2029 ctmp = abd_borrow_buf_copy(cabd, datalen); 2030 } 2031 2032 ret = zio_do_crypt_data(encrypt, key, ot, byteswap, salt, iv, mac, 2033 datalen, ptmp, ctmp, no_crypt); 2034 if (ret != 0) 2035 goto error; 2036 2037 if (encrypt) { 2038 abd_return_buf(pabd, ptmp, datalen); 2039 abd_return_buf_copy(cabd, ctmp, datalen); 2040 } else { 2041 abd_return_buf_copy(pabd, ptmp, datalen); 2042 abd_return_buf(cabd, ctmp, datalen); 2043 } 2044 2045 return (0); 2046 2047 error: 2048 if (encrypt) { 2049 abd_return_buf(pabd, ptmp, datalen); 2050 abd_return_buf_copy(cabd, ctmp, datalen); 2051 } else { 2052 abd_return_buf_copy(pabd, ptmp, datalen); 2053 abd_return_buf(cabd, ctmp, datalen); 2054 } 2055 2056 return (ret); 2057 } 2058 2059 #if defined(_KERNEL) 2060 /* CSTYLED */ 2061 module_param(zfs_key_max_salt_uses, ulong, 0644); 2062 MODULE_PARM_DESC(zfs_key_max_salt_uses, "Max number of times a salt value " 2063 "can be used for generating encryption keys before it is rotated"); 2064 #endif 2065