1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2021 Netflix Inc. 5 * Written by: John Baldwin <jhb@FreeBSD.org> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/types.h> 30 #include <sys/endian.h> 31 #include <sys/event.h> 32 #include <sys/ktls.h> 33 #include <sys/socket.h> 34 #include <sys/sysctl.h> 35 #include <netinet/in.h> 36 #include <netinet/tcp.h> 37 #include <crypto/cryptodev.h> 38 #include <assert.h> 39 #include <err.h> 40 #include <fcntl.h> 41 #include <poll.h> 42 #include <stdbool.h> 43 #include <stdlib.h> 44 #include <atf-c.h> 45 46 #include <openssl/err.h> 47 #include <openssl/evp.h> 48 #include <openssl/hmac.h> 49 50 static void 51 require_ktls(void) 52 { 53 size_t len; 54 bool enable; 55 56 len = sizeof(enable); 57 if (sysctlbyname("kern.ipc.tls.enable", &enable, &len, NULL, 0) == -1) { 58 if (errno == ENOENT) 59 atf_tc_skip("kernel does not support TLS offload"); 60 atf_libc_error(errno, "Failed to read kern.ipc.tls.enable"); 61 } 62 63 if (!enable) 64 atf_tc_skip("Kernel TLS is disabled"); 65 } 66 67 #define ATF_REQUIRE_KTLS() require_ktls() 68 69 static char 70 rdigit(void) 71 { 72 /* ASCII printable values between 0x20 and 0x7e */ 73 return (0x20 + random() % (0x7f - 0x20)); 74 } 75 76 static char * 77 alloc_buffer(size_t len) 78 { 79 char *buf; 80 size_t i; 81 82 if (len == 0) 83 return (NULL); 84 buf = malloc(len); 85 for (i = 0; i < len; i++) 86 buf[i] = rdigit(); 87 return (buf); 88 } 89 90 static bool 91 socketpair_tcp(int *sv) 92 { 93 struct pollfd pfd; 94 struct sockaddr_in sin; 95 socklen_t len; 96 int as, cs, ls; 97 98 ls = socket(PF_INET, SOCK_STREAM, 0); 99 if (ls == -1) { 100 warn("socket() for listen"); 101 return (false); 102 } 103 104 memset(&sin, 0, sizeof(sin)); 105 sin.sin_len = sizeof(sin); 106 sin.sin_family = AF_INET; 107 sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 108 if (bind(ls, (struct sockaddr *)&sin, sizeof(sin)) == -1) { 109 warn("bind"); 110 close(ls); 111 return (false); 112 } 113 114 if (listen(ls, 1) == -1) { 115 warn("listen"); 116 close(ls); 117 return (false); 118 } 119 120 len = sizeof(sin); 121 if (getsockname(ls, (struct sockaddr *)&sin, &len) == -1) { 122 warn("getsockname"); 123 close(ls); 124 return (false); 125 } 126 127 cs = socket(PF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0); 128 if (cs == -1) { 129 warn("socket() for connect"); 130 close(ls); 131 return (false); 132 } 133 134 if (connect(cs, (struct sockaddr *)&sin, sizeof(sin)) == -1) { 135 if (errno != EINPROGRESS) { 136 warn("connect"); 137 close(ls); 138 close(cs); 139 return (false); 140 } 141 } 142 143 as = accept4(ls, NULL, NULL, SOCK_NONBLOCK); 144 if (as == -1) { 145 warn("accept4"); 146 close(ls); 147 close(cs); 148 return (false); 149 } 150 151 close(ls); 152 153 pfd.fd = cs; 154 pfd.events = POLLOUT; 155 pfd.revents = 0; 156 ATF_REQUIRE(poll(&pfd, 1, INFTIM) == 1); 157 ATF_REQUIRE(pfd.revents == POLLOUT); 158 159 sv[0] = cs; 160 sv[1] = as; 161 return (true); 162 } 163 164 static void 165 fd_set_blocking(int fd) 166 { 167 int flags; 168 169 ATF_REQUIRE((flags = fcntl(fd, F_GETFL)) != -1); 170 flags &= ~O_NONBLOCK; 171 ATF_REQUIRE(fcntl(fd, F_SETFL, flags) != -1); 172 } 173 174 static bool 175 cbc_decrypt(const EVP_CIPHER *cipher, const char *key, const char *iv, 176 const char *input, char *output, size_t size) 177 { 178 EVP_CIPHER_CTX *ctx; 179 int outl, total; 180 181 ctx = EVP_CIPHER_CTX_new(); 182 if (ctx == NULL) { 183 warnx("EVP_CIPHER_CTX_new failed: %s", 184 ERR_error_string(ERR_get_error(), NULL)); 185 return (false); 186 } 187 if (EVP_CipherInit_ex(ctx, cipher, NULL, (const u_char *)key, 188 (const u_char *)iv, 0) != 1) { 189 warnx("EVP_CipherInit_ex failed: %s", 190 ERR_error_string(ERR_get_error(), NULL)); 191 EVP_CIPHER_CTX_free(ctx); 192 return (false); 193 } 194 EVP_CIPHER_CTX_set_padding(ctx, 0); 195 if (EVP_CipherUpdate(ctx, (u_char *)output, &outl, 196 (const u_char *)input, size) != 1) { 197 warnx("EVP_CipherUpdate failed: %s", 198 ERR_error_string(ERR_get_error(), NULL)); 199 EVP_CIPHER_CTX_free(ctx); 200 return (false); 201 } 202 total = outl; 203 if (EVP_CipherFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) { 204 warnx("EVP_CipherFinal_ex failed: %s", 205 ERR_error_string(ERR_get_error(), NULL)); 206 EVP_CIPHER_CTX_free(ctx); 207 return (false); 208 } 209 total += outl; 210 if ((size_t)total != size) { 211 warnx("decrypt size mismatch: %zu vs %d", size, total); 212 EVP_CIPHER_CTX_free(ctx); 213 return (false); 214 } 215 EVP_CIPHER_CTX_free(ctx); 216 return (true); 217 } 218 219 static bool 220 verify_hash(const EVP_MD *md, const void *key, size_t key_len, const void *aad, 221 size_t aad_len, const void *buffer, size_t len, const void *digest) 222 { 223 HMAC_CTX *ctx; 224 unsigned char digest2[EVP_MAX_MD_SIZE]; 225 u_int digest_len; 226 227 ctx = HMAC_CTX_new(); 228 if (ctx == NULL) { 229 warnx("HMAC_CTX_new failed: %s", 230 ERR_error_string(ERR_get_error(), NULL)); 231 return (false); 232 } 233 if (HMAC_Init_ex(ctx, key, key_len, md, NULL) != 1) { 234 warnx("HMAC_Init_ex failed: %s", 235 ERR_error_string(ERR_get_error(), NULL)); 236 HMAC_CTX_free(ctx); 237 return (false); 238 } 239 if (HMAC_Update(ctx, aad, aad_len) != 1) { 240 warnx("HMAC_Update (aad) failed: %s", 241 ERR_error_string(ERR_get_error(), NULL)); 242 HMAC_CTX_free(ctx); 243 return (false); 244 } 245 if (HMAC_Update(ctx, buffer, len) != 1) { 246 warnx("HMAC_Update (payload) failed: %s", 247 ERR_error_string(ERR_get_error(), NULL)); 248 HMAC_CTX_free(ctx); 249 return (false); 250 } 251 if (HMAC_Final(ctx, digest2, &digest_len) != 1) { 252 warnx("HMAC_Final failed: %s", 253 ERR_error_string(ERR_get_error(), NULL)); 254 HMAC_CTX_free(ctx); 255 return (false); 256 } 257 HMAC_CTX_free(ctx); 258 if (memcmp(digest, digest2, digest_len) != 0) { 259 warnx("HMAC mismatch"); 260 return (false); 261 } 262 return (true); 263 } 264 265 static bool 266 aead_encrypt(const EVP_CIPHER *cipher, const char *key, const char *nonce, 267 const void *aad, size_t aad_len, const char *input, char *output, 268 size_t size, char *tag, size_t tag_len) 269 { 270 EVP_CIPHER_CTX *ctx; 271 int outl, total; 272 273 ctx = EVP_CIPHER_CTX_new(); 274 if (ctx == NULL) { 275 warnx("EVP_CIPHER_CTX_new failed: %s", 276 ERR_error_string(ERR_get_error(), NULL)); 277 return (false); 278 } 279 if (EVP_EncryptInit_ex(ctx, cipher, NULL, (const u_char *)key, 280 (const u_char *)nonce) != 1) { 281 warnx("EVP_EncryptInit_ex failed: %s", 282 ERR_error_string(ERR_get_error(), NULL)); 283 EVP_CIPHER_CTX_free(ctx); 284 return (false); 285 } 286 EVP_CIPHER_CTX_set_padding(ctx, 0); 287 if (aad != NULL) { 288 if (EVP_EncryptUpdate(ctx, NULL, &outl, (const u_char *)aad, 289 aad_len) != 1) { 290 warnx("EVP_EncryptUpdate for AAD failed: %s", 291 ERR_error_string(ERR_get_error(), NULL)); 292 EVP_CIPHER_CTX_free(ctx); 293 return (false); 294 } 295 } 296 if (EVP_EncryptUpdate(ctx, (u_char *)output, &outl, 297 (const u_char *)input, size) != 1) { 298 warnx("EVP_EncryptUpdate failed: %s", 299 ERR_error_string(ERR_get_error(), NULL)); 300 EVP_CIPHER_CTX_free(ctx); 301 return (false); 302 } 303 total = outl; 304 if (EVP_EncryptFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) { 305 warnx("EVP_EncryptFinal_ex failed: %s", 306 ERR_error_string(ERR_get_error(), NULL)); 307 EVP_CIPHER_CTX_free(ctx); 308 return (false); 309 } 310 total += outl; 311 if ((size_t)total != size) { 312 warnx("encrypt size mismatch: %zu vs %d", size, total); 313 EVP_CIPHER_CTX_free(ctx); 314 return (false); 315 } 316 if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, tag_len, tag) != 317 1) { 318 warnx("EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_GET_TAG) failed: %s", 319 ERR_error_string(ERR_get_error(), NULL)); 320 EVP_CIPHER_CTX_free(ctx); 321 return (false); 322 } 323 EVP_CIPHER_CTX_free(ctx); 324 return (true); 325 } 326 327 static bool 328 aead_decrypt(const EVP_CIPHER *cipher, const char *key, const char *nonce, 329 const void *aad, size_t aad_len, const char *input, char *output, 330 size_t size, const char *tag, size_t tag_len) 331 { 332 EVP_CIPHER_CTX *ctx; 333 int outl, total; 334 bool valid; 335 336 ctx = EVP_CIPHER_CTX_new(); 337 if (ctx == NULL) { 338 warnx("EVP_CIPHER_CTX_new failed: %s", 339 ERR_error_string(ERR_get_error(), NULL)); 340 return (false); 341 } 342 if (EVP_DecryptInit_ex(ctx, cipher, NULL, (const u_char *)key, 343 (const u_char *)nonce) != 1) { 344 warnx("EVP_DecryptInit_ex failed: %s", 345 ERR_error_string(ERR_get_error(), NULL)); 346 EVP_CIPHER_CTX_free(ctx); 347 return (false); 348 } 349 EVP_CIPHER_CTX_set_padding(ctx, 0); 350 if (aad != NULL) { 351 if (EVP_DecryptUpdate(ctx, NULL, &outl, (const u_char *)aad, 352 aad_len) != 1) { 353 warnx("EVP_DecryptUpdate for AAD failed: %s", 354 ERR_error_string(ERR_get_error(), NULL)); 355 EVP_CIPHER_CTX_free(ctx); 356 return (false); 357 } 358 } 359 if (EVP_DecryptUpdate(ctx, (u_char *)output, &outl, 360 (const u_char *)input, size) != 1) { 361 warnx("EVP_DecryptUpdate failed: %s", 362 ERR_error_string(ERR_get_error(), NULL)); 363 EVP_CIPHER_CTX_free(ctx); 364 return (false); 365 } 366 total = outl; 367 if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, tag_len, 368 __DECONST(char *, tag)) != 1) { 369 warnx("EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed: %s", 370 ERR_error_string(ERR_get_error(), NULL)); 371 EVP_CIPHER_CTX_free(ctx); 372 return (false); 373 } 374 valid = (EVP_DecryptFinal_ex(ctx, (u_char *)output + outl, &outl) == 1); 375 total += outl; 376 if ((size_t)total != size) { 377 warnx("decrypt size mismatch: %zu vs %d", size, total); 378 EVP_CIPHER_CTX_free(ctx); 379 return (false); 380 } 381 if (!valid) 382 warnx("tag mismatch"); 383 EVP_CIPHER_CTX_free(ctx); 384 return (valid); 385 } 386 387 static void 388 build_tls_enable(int cipher_alg, size_t cipher_key_len, int auth_alg, 389 int minor, uint64_t seqno, struct tls_enable *en) 390 { 391 u_int auth_key_len, iv_len; 392 393 memset(en, 0, sizeof(*en)); 394 395 switch (cipher_alg) { 396 case CRYPTO_AES_CBC: 397 if (minor == TLS_MINOR_VER_ZERO) 398 iv_len = AES_BLOCK_LEN; 399 else 400 iv_len = 0; 401 break; 402 case CRYPTO_AES_NIST_GCM_16: 403 if (minor == TLS_MINOR_VER_TWO) 404 iv_len = TLS_AEAD_GCM_LEN; 405 else 406 iv_len = TLS_1_3_GCM_IV_LEN; 407 break; 408 case CRYPTO_CHACHA20_POLY1305: 409 iv_len = TLS_CHACHA20_IV_LEN; 410 break; 411 default: 412 iv_len = 0; 413 break; 414 } 415 switch (auth_alg) { 416 case CRYPTO_SHA1_HMAC: 417 auth_key_len = SHA1_HASH_LEN; 418 break; 419 case CRYPTO_SHA2_256_HMAC: 420 auth_key_len = SHA2_256_HASH_LEN; 421 break; 422 case CRYPTO_SHA2_384_HMAC: 423 auth_key_len = SHA2_384_HASH_LEN; 424 break; 425 default: 426 auth_key_len = 0; 427 break; 428 } 429 en->cipher_key = alloc_buffer(cipher_key_len); 430 en->iv = alloc_buffer(iv_len); 431 en->auth_key = alloc_buffer(auth_key_len); 432 en->cipher_algorithm = cipher_alg; 433 en->cipher_key_len = cipher_key_len; 434 en->iv_len = iv_len; 435 en->auth_algorithm = auth_alg; 436 en->auth_key_len = auth_key_len; 437 en->tls_vmajor = TLS_MAJOR_VER_ONE; 438 en->tls_vminor = minor; 439 be64enc(en->rec_seq, seqno); 440 } 441 442 static void 443 free_tls_enable(struct tls_enable *en) 444 { 445 free(__DECONST(void *, en->cipher_key)); 446 free(__DECONST(void *, en->iv)); 447 free(__DECONST(void *, en->auth_key)); 448 } 449 450 static const EVP_CIPHER * 451 tls_EVP_CIPHER(const struct tls_enable *en) 452 { 453 switch (en->cipher_algorithm) { 454 case CRYPTO_AES_CBC: 455 switch (en->cipher_key_len) { 456 case 128 / 8: 457 return (EVP_aes_128_cbc()); 458 case 256 / 8: 459 return (EVP_aes_256_cbc()); 460 default: 461 return (NULL); 462 } 463 break; 464 case CRYPTO_AES_NIST_GCM_16: 465 switch (en->cipher_key_len) { 466 case 128 / 8: 467 return (EVP_aes_128_gcm()); 468 case 256 / 8: 469 return (EVP_aes_256_gcm()); 470 default: 471 return (NULL); 472 } 473 break; 474 case CRYPTO_CHACHA20_POLY1305: 475 return (EVP_chacha20_poly1305()); 476 default: 477 return (NULL); 478 } 479 } 480 481 static const EVP_MD * 482 tls_EVP_MD(const struct tls_enable *en) 483 { 484 switch (en->auth_algorithm) { 485 case CRYPTO_SHA1_HMAC: 486 return (EVP_sha1()); 487 case CRYPTO_SHA2_256_HMAC: 488 return (EVP_sha256()); 489 case CRYPTO_SHA2_384_HMAC: 490 return (EVP_sha384()); 491 default: 492 return (NULL); 493 } 494 } 495 496 static size_t 497 tls_header_len(struct tls_enable *en) 498 { 499 size_t len; 500 501 len = sizeof(struct tls_record_layer); 502 switch (en->cipher_algorithm) { 503 case CRYPTO_AES_CBC: 504 if (en->tls_vminor != TLS_MINOR_VER_ZERO) 505 len += AES_BLOCK_LEN; 506 return (len); 507 case CRYPTO_AES_NIST_GCM_16: 508 if (en->tls_vminor == TLS_MINOR_VER_TWO) 509 len += sizeof(uint64_t); 510 return (len); 511 case CRYPTO_CHACHA20_POLY1305: 512 return (len); 513 default: 514 return (0); 515 } 516 } 517 518 static size_t 519 tls_mac_len(struct tls_enable *en) 520 { 521 switch (en->cipher_algorithm) { 522 case CRYPTO_AES_CBC: 523 switch (en->auth_algorithm) { 524 case CRYPTO_SHA1_HMAC: 525 return (SHA1_HASH_LEN); 526 case CRYPTO_SHA2_256_HMAC: 527 return (SHA2_256_HASH_LEN); 528 case CRYPTO_SHA2_384_HMAC: 529 return (SHA2_384_HASH_LEN); 530 default: 531 return (0); 532 } 533 case CRYPTO_AES_NIST_GCM_16: 534 return (AES_GMAC_HASH_LEN); 535 case CRYPTO_CHACHA20_POLY1305: 536 return (POLY1305_HASH_LEN); 537 default: 538 return (0); 539 } 540 } 541 542 /* Includes maximum padding for MTE. */ 543 static size_t 544 tls_trailer_len(struct tls_enable *en) 545 { 546 size_t len; 547 548 len = tls_mac_len(en); 549 if (en->cipher_algorithm == CRYPTO_AES_CBC) 550 len += AES_BLOCK_LEN; 551 if (en->tls_vminor == TLS_MINOR_VER_THREE) 552 len++; 553 return (len); 554 } 555 556 /* 'len' is the length of the payload application data. */ 557 static void 558 tls_mte_aad(struct tls_enable *en, size_t len, 559 const struct tls_record_layer *hdr, uint64_t seqno, struct tls_mac_data *ad) 560 { 561 ad->seq = htobe64(seqno); 562 ad->type = hdr->tls_type; 563 ad->tls_vmajor = hdr->tls_vmajor; 564 ad->tls_vminor = hdr->tls_vminor; 565 ad->tls_length = htons(len); 566 } 567 568 static void 569 tls_12_aead_aad(struct tls_enable *en, size_t len, 570 const struct tls_record_layer *hdr, uint64_t seqno, 571 struct tls_aead_data *ad) 572 { 573 ad->seq = htobe64(seqno); 574 ad->type = hdr->tls_type; 575 ad->tls_vmajor = hdr->tls_vmajor; 576 ad->tls_vminor = hdr->tls_vminor; 577 ad->tls_length = htons(len); 578 } 579 580 static void 581 tls_13_aad(struct tls_enable *en, const struct tls_record_layer *hdr, 582 uint64_t seqno, struct tls_aead_data_13 *ad) 583 { 584 ad->type = hdr->tls_type; 585 ad->tls_vmajor = hdr->tls_vmajor; 586 ad->tls_vminor = hdr->tls_vminor; 587 ad->tls_length = hdr->tls_length; 588 } 589 590 static void 591 tls_12_gcm_nonce(struct tls_enable *en, const struct tls_record_layer *hdr, 592 char *nonce) 593 { 594 memcpy(nonce, en->iv, TLS_AEAD_GCM_LEN); 595 memcpy(nonce + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t)); 596 } 597 598 static void 599 tls_13_nonce(struct tls_enable *en, uint64_t seqno, char *nonce) 600 { 601 static_assert(TLS_1_3_GCM_IV_LEN == TLS_CHACHA20_IV_LEN, 602 "TLS 1.3 nonce length mismatch"); 603 memcpy(nonce, en->iv, TLS_1_3_GCM_IV_LEN); 604 *(uint64_t *)(nonce + 4) ^= htobe64(seqno); 605 } 606 607 /* 608 * Decrypt a TLS record 'len' bytes long at 'src' and store the result at 609 * 'dst'. If the TLS record header length doesn't match or 'dst' doesn't 610 * have sufficient room ('avail'), fail the test. 611 */ 612 static size_t 613 decrypt_tls_aes_cbc_mte(struct tls_enable *en, uint64_t seqno, const void *src, 614 size_t len, void *dst, size_t avail, uint8_t *record_type) 615 { 616 const struct tls_record_layer *hdr; 617 struct tls_mac_data aad; 618 const char *iv; 619 char *buf; 620 size_t hdr_len, mac_len, payload_len; 621 int padding; 622 623 hdr = src; 624 hdr_len = tls_header_len(en); 625 mac_len = tls_mac_len(en); 626 ATF_REQUIRE(hdr->tls_vmajor == TLS_MAJOR_VER_ONE); 627 ATF_REQUIRE(hdr->tls_vminor == en->tls_vminor); 628 629 /* First, decrypt the outer payload into a temporary buffer. */ 630 payload_len = len - hdr_len; 631 buf = malloc(payload_len); 632 if (en->tls_vminor == TLS_MINOR_VER_ZERO) 633 iv = en->iv; 634 else 635 iv = (void *)(hdr + 1); 636 ATF_REQUIRE(cbc_decrypt(tls_EVP_CIPHER(en), en->cipher_key, iv, 637 (const u_char *)src + hdr_len, buf, payload_len)); 638 639 /* 640 * Copy the last encrypted block to use as the IV for the next 641 * record for TLS 1.0. 642 */ 643 if (en->tls_vminor == TLS_MINOR_VER_ZERO) 644 memcpy(__DECONST(uint8_t *, en->iv), (const u_char *)src + 645 (len - AES_BLOCK_LEN), AES_BLOCK_LEN); 646 647 /* 648 * Verify trailing padding and strip. 649 * 650 * The kernel always generates the smallest amount of padding. 651 */ 652 padding = buf[payload_len - 1] + 1; 653 ATF_REQUIRE(padding > 0 && padding <= AES_BLOCK_LEN); 654 ATF_REQUIRE(payload_len >= mac_len + padding); 655 payload_len -= padding; 656 657 /* Verify HMAC. */ 658 payload_len -= mac_len; 659 tls_mte_aad(en, payload_len, hdr, seqno, &aad); 660 ATF_REQUIRE(verify_hash(tls_EVP_MD(en), en->auth_key, en->auth_key_len, 661 &aad, sizeof(aad), buf, payload_len, buf + payload_len)); 662 663 ATF_REQUIRE(payload_len <= avail); 664 memcpy(dst, buf, payload_len); 665 *record_type = hdr->tls_type; 666 return (payload_len); 667 } 668 669 static size_t 670 decrypt_tls_12_aead(struct tls_enable *en, uint64_t seqno, const void *src, 671 size_t len, void *dst, uint8_t *record_type) 672 { 673 const struct tls_record_layer *hdr; 674 struct tls_aead_data aad; 675 char nonce[12]; 676 size_t hdr_len, mac_len, payload_len; 677 678 hdr = src; 679 680 hdr_len = tls_header_len(en); 681 mac_len = tls_mac_len(en); 682 payload_len = len - (hdr_len + mac_len); 683 ATF_REQUIRE(hdr->tls_vmajor == TLS_MAJOR_VER_ONE); 684 ATF_REQUIRE(hdr->tls_vminor == TLS_MINOR_VER_TWO); 685 686 tls_12_aead_aad(en, payload_len, hdr, seqno, &aad); 687 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 688 tls_12_gcm_nonce(en, hdr, nonce); 689 else 690 tls_13_nonce(en, seqno, nonce); 691 692 ATF_REQUIRE(aead_decrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 693 &aad, sizeof(aad), (const char *)src + hdr_len, dst, payload_len, 694 (const char *)src + hdr_len + payload_len, mac_len)); 695 696 *record_type = hdr->tls_type; 697 return (payload_len); 698 } 699 700 static size_t 701 decrypt_tls_13_aead(struct tls_enable *en, uint64_t seqno, const void *src, 702 size_t len, void *dst, uint8_t *record_type) 703 { 704 const struct tls_record_layer *hdr; 705 struct tls_aead_data_13 aad; 706 char nonce[12]; 707 char *buf; 708 size_t hdr_len, mac_len, payload_len; 709 710 hdr = src; 711 712 hdr_len = tls_header_len(en); 713 mac_len = tls_mac_len(en); 714 payload_len = len - (hdr_len + mac_len); 715 ATF_REQUIRE(payload_len >= 1); 716 ATF_REQUIRE(hdr->tls_type == TLS_RLTYPE_APP); 717 ATF_REQUIRE(hdr->tls_vmajor == TLS_MAJOR_VER_ONE); 718 ATF_REQUIRE(hdr->tls_vminor == TLS_MINOR_VER_TWO); 719 720 tls_13_aad(en, hdr, seqno, &aad); 721 tls_13_nonce(en, seqno, nonce); 722 723 /* 724 * Have to use a temporary buffer for the output due to the 725 * record type as the last byte of the trailer. 726 */ 727 buf = malloc(payload_len); 728 729 ATF_REQUIRE(aead_decrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 730 &aad, sizeof(aad), (const char *)src + hdr_len, buf, payload_len, 731 (const char *)src + hdr_len + payload_len, mac_len)); 732 733 /* Trim record type. */ 734 *record_type = buf[payload_len - 1]; 735 payload_len--; 736 737 memcpy(dst, buf, payload_len); 738 free(buf); 739 740 return (payload_len); 741 } 742 743 static size_t 744 decrypt_tls_aead(struct tls_enable *en, uint64_t seqno, const void *src, 745 size_t len, void *dst, size_t avail, uint8_t *record_type) 746 { 747 const struct tls_record_layer *hdr; 748 size_t payload_len; 749 750 hdr = src; 751 ATF_REQUIRE(ntohs(hdr->tls_length) + sizeof(*hdr) == len); 752 753 payload_len = len - (tls_header_len(en) + tls_trailer_len(en)); 754 ATF_REQUIRE(payload_len <= avail); 755 756 if (en->tls_vminor == TLS_MINOR_VER_TWO) { 757 ATF_REQUIRE(decrypt_tls_12_aead(en, seqno, src, len, dst, 758 record_type) == payload_len); 759 } else { 760 ATF_REQUIRE(decrypt_tls_13_aead(en, seqno, src, len, dst, 761 record_type) == payload_len); 762 } 763 764 return (payload_len); 765 } 766 767 static size_t 768 decrypt_tls_record(struct tls_enable *en, uint64_t seqno, const void *src, 769 size_t len, void *dst, size_t avail, uint8_t *record_type) 770 { 771 if (en->cipher_algorithm == CRYPTO_AES_CBC) 772 return (decrypt_tls_aes_cbc_mte(en, seqno, src, len, dst, avail, 773 record_type)); 774 else 775 return (decrypt_tls_aead(en, seqno, src, len, dst, avail, 776 record_type)); 777 } 778 779 /* 780 * Encrypt a TLS record of type 'record_type' with payload 'len' bytes 781 * long at 'src' and store the result at 'dst'. If 'dst' doesn't have 782 * sufficient room ('avail'), fail the test. 783 */ 784 static size_t 785 encrypt_tls_12_aead(struct tls_enable *en, uint8_t record_type, uint64_t seqno, 786 const void *src, size_t len, void *dst) 787 { 788 struct tls_record_layer *hdr; 789 struct tls_aead_data aad; 790 char nonce[12]; 791 size_t hdr_len, mac_len, record_len; 792 793 hdr = dst; 794 795 hdr_len = tls_header_len(en); 796 mac_len = tls_mac_len(en); 797 record_len = hdr_len + len + mac_len; 798 799 hdr->tls_type = record_type; 800 hdr->tls_vmajor = TLS_MAJOR_VER_ONE; 801 hdr->tls_vminor = TLS_MINOR_VER_TWO; 802 hdr->tls_length = htons(record_len - sizeof(*hdr)); 803 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 804 memcpy(hdr + 1, &seqno, sizeof(seqno)); 805 806 tls_12_aead_aad(en, len, hdr, seqno, &aad); 807 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 808 tls_12_gcm_nonce(en, hdr, nonce); 809 else 810 tls_13_nonce(en, seqno, nonce); 811 812 ATF_REQUIRE(aead_encrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 813 &aad, sizeof(aad), src, (char *)dst + hdr_len, len, 814 (char *)dst + hdr_len + len, mac_len)); 815 816 return (record_len); 817 } 818 819 static size_t 820 encrypt_tls_13_aead(struct tls_enable *en, uint8_t record_type, uint64_t seqno, 821 const void *src, size_t len, void *dst, size_t padding) 822 { 823 struct tls_record_layer *hdr; 824 struct tls_aead_data_13 aad; 825 char nonce[12]; 826 char *buf; 827 size_t hdr_len, mac_len, record_len; 828 829 hdr = dst; 830 831 hdr_len = tls_header_len(en); 832 mac_len = tls_mac_len(en); 833 record_len = hdr_len + len + 1 + padding + mac_len; 834 835 hdr->tls_type = TLS_RLTYPE_APP; 836 hdr->tls_vmajor = TLS_MAJOR_VER_ONE; 837 hdr->tls_vminor = TLS_MINOR_VER_TWO; 838 hdr->tls_length = htons(record_len - sizeof(*hdr)); 839 840 tls_13_aad(en, hdr, seqno, &aad); 841 tls_13_nonce(en, seqno, nonce); 842 843 /* 844 * Have to use a temporary buffer for the input so that the record 845 * type can be appended. 846 */ 847 buf = malloc(len + 1 + padding); 848 memcpy(buf, src, len); 849 buf[len] = record_type; 850 memset(buf + len + 1, 0, padding); 851 852 ATF_REQUIRE(aead_encrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 853 &aad, sizeof(aad), buf, (char *)dst + hdr_len, len + 1 + padding, 854 (char *)dst + hdr_len + len + 1 + padding, mac_len)); 855 856 free(buf); 857 858 return (record_len); 859 } 860 861 static size_t 862 encrypt_tls_aead(struct tls_enable *en, uint8_t record_type, uint64_t seqno, 863 const void *src, size_t len, void *dst, size_t avail, size_t padding) 864 { 865 size_t record_len; 866 867 record_len = tls_header_len(en) + len + padding + tls_trailer_len(en); 868 ATF_REQUIRE(record_len <= avail); 869 870 if (en->tls_vminor == TLS_MINOR_VER_TWO) { 871 ATF_REQUIRE(padding == 0); 872 ATF_REQUIRE(encrypt_tls_12_aead(en, record_type, seqno, src, 873 len, dst) == record_len); 874 } else 875 ATF_REQUIRE(encrypt_tls_13_aead(en, record_type, seqno, src, 876 len, dst, padding) == record_len); 877 878 return (record_len); 879 } 880 881 static size_t 882 encrypt_tls_record(struct tls_enable *en, uint8_t record_type, uint64_t seqno, 883 const void *src, size_t len, void *dst, size_t avail, size_t padding) 884 { 885 return (encrypt_tls_aead(en, record_type, seqno, src, len, dst, avail, 886 padding)); 887 } 888 889 static void 890 test_ktls_transmit_app_data(struct tls_enable *en, uint64_t seqno, size_t len) 891 { 892 struct kevent ev; 893 struct tls_record_layer *hdr; 894 char *plaintext, *decrypted, *outbuf; 895 size_t decrypted_len, outbuf_len, outbuf_cap, record_len, written; 896 ssize_t rv; 897 int kq, sockets[2]; 898 uint8_t record_type; 899 900 plaintext = alloc_buffer(len); 901 decrypted = malloc(len); 902 outbuf_cap = tls_header_len(en) + TLS_MAX_MSG_SIZE_V10_2 + 903 tls_trailer_len(en); 904 outbuf = malloc(outbuf_cap); 905 hdr = (struct tls_record_layer *)outbuf; 906 907 ATF_REQUIRE((kq = kqueue()) != -1); 908 909 ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets"); 910 911 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 912 sizeof(*en)) == 0); 913 914 EV_SET(&ev, sockets[0], EVFILT_READ, EV_ADD, 0, 0, NULL); 915 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 916 EV_SET(&ev, sockets[1], EVFILT_WRITE, EV_ADD, 0, 0, NULL); 917 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 918 919 decrypted_len = 0; 920 outbuf_len = 0; 921 written = 0; 922 923 while (decrypted_len != len) { 924 ATF_REQUIRE(kevent(kq, NULL, 0, &ev, 1, NULL) == 1); 925 926 switch (ev.filter) { 927 case EVFILT_WRITE: 928 /* Try to write any remaining data. */ 929 rv = write(ev.ident, plaintext + written, 930 len - written); 931 ATF_REQUIRE_MSG(rv > 0, 932 "failed to write to socket"); 933 written += rv; 934 if (written == len) { 935 ev.flags = EV_DISABLE; 936 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, 937 NULL) == 0); 938 } 939 break; 940 941 case EVFILT_READ: 942 ATF_REQUIRE((ev.flags & EV_EOF) == 0); 943 944 /* 945 * Try to read data for the next TLS record 946 * into outbuf. Start by reading the header 947 * to determine how much additional data to 948 * read. 949 */ 950 if (outbuf_len < sizeof(struct tls_record_layer)) { 951 rv = read(ev.ident, outbuf + outbuf_len, 952 sizeof(struct tls_record_layer) - 953 outbuf_len); 954 ATF_REQUIRE_MSG(rv > 0, 955 "failed to read from socket"); 956 outbuf_len += rv; 957 } 958 959 if (outbuf_len < sizeof(struct tls_record_layer)) 960 break; 961 962 record_len = sizeof(struct tls_record_layer) + 963 ntohs(hdr->tls_length); 964 ATF_REQUIRE(record_len <= outbuf_cap); 965 ATF_REQUIRE(record_len > outbuf_len); 966 rv = read(ev.ident, outbuf + outbuf_len, 967 record_len - outbuf_len); 968 if (rv == -1 && errno == EAGAIN) 969 break; 970 ATF_REQUIRE_MSG(rv > 0, "failed to read from socket"); 971 972 outbuf_len += rv; 973 if (outbuf_len == record_len) { 974 decrypted_len += decrypt_tls_record(en, seqno, 975 outbuf, outbuf_len, 976 decrypted + decrypted_len, 977 len - decrypted_len, &record_type); 978 ATF_REQUIRE(record_type == TLS_RLTYPE_APP); 979 980 seqno++; 981 outbuf_len = 0; 982 } 983 break; 984 } 985 } 986 987 ATF_REQUIRE_MSG(written == decrypted_len, 988 "read %zu decrypted bytes, but wrote %zu", decrypted_len, written); 989 990 ATF_REQUIRE(memcmp(plaintext, decrypted, len) == 0); 991 992 free(outbuf); 993 free(decrypted); 994 free(plaintext); 995 996 ATF_REQUIRE(close(sockets[1]) == 0); 997 ATF_REQUIRE(close(sockets[0]) == 0); 998 ATF_REQUIRE(close(kq) == 0); 999 } 1000 1001 static void 1002 ktls_send_control_message(int fd, uint8_t type, void *data, size_t len) 1003 { 1004 struct msghdr msg; 1005 struct cmsghdr *cmsg; 1006 char cbuf[CMSG_SPACE(sizeof(type))]; 1007 struct iovec iov; 1008 1009 memset(&msg, 0, sizeof(msg)); 1010 1011 msg.msg_control = cbuf; 1012 msg.msg_controllen = sizeof(cbuf); 1013 cmsg = CMSG_FIRSTHDR(&msg); 1014 cmsg->cmsg_level = IPPROTO_TCP; 1015 cmsg->cmsg_type = TLS_SET_RECORD_TYPE; 1016 cmsg->cmsg_len = CMSG_LEN(sizeof(type)); 1017 *(uint8_t *)CMSG_DATA(cmsg) = type; 1018 1019 iov.iov_base = data; 1020 iov.iov_len = len; 1021 msg.msg_iov = &iov; 1022 msg.msg_iovlen = 1; 1023 1024 ATF_REQUIRE(sendmsg(fd, &msg, 0) == (ssize_t)len); 1025 } 1026 1027 static void 1028 test_ktls_transmit_control(struct tls_enable *en, uint64_t seqno, uint8_t type, 1029 size_t len) 1030 { 1031 struct tls_record_layer *hdr; 1032 char *plaintext, *decrypted, *outbuf; 1033 size_t outbuf_cap, payload_len, record_len; 1034 ssize_t rv; 1035 int sockets[2]; 1036 uint8_t record_type; 1037 1038 ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2); 1039 1040 plaintext = alloc_buffer(len); 1041 decrypted = malloc(len); 1042 outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en); 1043 outbuf = malloc(outbuf_cap); 1044 hdr = (struct tls_record_layer *)outbuf; 1045 1046 ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets"); 1047 1048 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 1049 sizeof(*en)) == 0); 1050 1051 fd_set_blocking(sockets[0]); 1052 fd_set_blocking(sockets[1]); 1053 1054 ktls_send_control_message(sockets[1], type, plaintext, len); 1055 1056 /* 1057 * First read the header to determine how much additional data 1058 * to read. 1059 */ 1060 rv = read(sockets[0], outbuf, sizeof(struct tls_record_layer)); 1061 ATF_REQUIRE(rv == sizeof(struct tls_record_layer)); 1062 payload_len = ntohs(hdr->tls_length); 1063 record_len = payload_len + sizeof(struct tls_record_layer); 1064 ATF_REQUIRE(record_len <= outbuf_cap); 1065 rv = read(sockets[0], outbuf + sizeof(struct tls_record_layer), 1066 payload_len); 1067 ATF_REQUIRE(rv == (ssize_t)payload_len); 1068 1069 rv = decrypt_tls_record(en, seqno, outbuf, record_len, decrypted, len, 1070 &record_type); 1071 1072 ATF_REQUIRE_MSG((ssize_t)len == rv, 1073 "read %zd decrypted bytes, but wrote %zu", rv, len); 1074 ATF_REQUIRE(record_type == type); 1075 1076 ATF_REQUIRE(memcmp(plaintext, decrypted, len) == 0); 1077 1078 free(outbuf); 1079 free(decrypted); 1080 free(plaintext); 1081 1082 ATF_REQUIRE(close(sockets[1]) == 0); 1083 ATF_REQUIRE(close(sockets[0]) == 0); 1084 } 1085 1086 static void 1087 test_ktls_transmit_empty_fragment(struct tls_enable *en, uint64_t seqno) 1088 { 1089 struct tls_record_layer *hdr; 1090 char *outbuf; 1091 size_t outbuf_cap, payload_len, record_len; 1092 ssize_t rv; 1093 int sockets[2]; 1094 uint8_t record_type; 1095 1096 outbuf_cap = tls_header_len(en) + tls_trailer_len(en); 1097 outbuf = malloc(outbuf_cap); 1098 hdr = (struct tls_record_layer *)outbuf; 1099 1100 ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets"); 1101 1102 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 1103 sizeof(*en)) == 0); 1104 1105 fd_set_blocking(sockets[0]); 1106 fd_set_blocking(sockets[1]); 1107 1108 /* A write of zero bytes should send an empty fragment. */ 1109 rv = write(sockets[1], NULL, 0); 1110 ATF_REQUIRE(rv == 0); 1111 1112 /* 1113 * First read the header to determine how much additional data 1114 * to read. 1115 */ 1116 rv = read(sockets[0], outbuf, sizeof(struct tls_record_layer)); 1117 ATF_REQUIRE(rv == sizeof(struct tls_record_layer)); 1118 payload_len = ntohs(hdr->tls_length); 1119 record_len = payload_len + sizeof(struct tls_record_layer); 1120 ATF_REQUIRE(record_len <= outbuf_cap); 1121 rv = read(sockets[0], outbuf + sizeof(struct tls_record_layer), 1122 payload_len); 1123 ATF_REQUIRE(rv == (ssize_t)payload_len); 1124 1125 rv = decrypt_tls_record(en, seqno, outbuf, record_len, NULL, 0, 1126 &record_type); 1127 1128 ATF_REQUIRE_MSG(rv == 0, 1129 "read %zd decrypted bytes for an empty fragment", rv); 1130 ATF_REQUIRE(record_type == TLS_RLTYPE_APP); 1131 1132 free(outbuf); 1133 1134 ATF_REQUIRE(close(sockets[1]) == 0); 1135 ATF_REQUIRE(close(sockets[0]) == 0); 1136 } 1137 1138 static size_t 1139 ktls_receive_tls_record(struct tls_enable *en, int fd, uint8_t record_type, 1140 void *data, size_t len) 1141 { 1142 struct msghdr msg; 1143 struct cmsghdr *cmsg; 1144 struct tls_get_record *tgr; 1145 char cbuf[CMSG_SPACE(sizeof(*tgr))]; 1146 struct iovec iov; 1147 ssize_t rv; 1148 1149 memset(&msg, 0, sizeof(msg)); 1150 1151 msg.msg_control = cbuf; 1152 msg.msg_controllen = sizeof(cbuf); 1153 1154 iov.iov_base = data; 1155 iov.iov_len = len; 1156 msg.msg_iov = &iov; 1157 msg.msg_iovlen = 1; 1158 1159 ATF_REQUIRE((rv = recvmsg(fd, &msg, 0)) > 0); 1160 1161 ATF_REQUIRE((msg.msg_flags & (MSG_EOR | MSG_CTRUNC)) == MSG_EOR); 1162 1163 cmsg = CMSG_FIRSTHDR(&msg); 1164 ATF_REQUIRE(cmsg != NULL); 1165 ATF_REQUIRE(cmsg->cmsg_level == IPPROTO_TCP); 1166 ATF_REQUIRE(cmsg->cmsg_type == TLS_GET_RECORD); 1167 ATF_REQUIRE(cmsg->cmsg_len == CMSG_LEN(sizeof(*tgr))); 1168 1169 tgr = (struct tls_get_record *)CMSG_DATA(cmsg); 1170 ATF_REQUIRE(tgr->tls_type == record_type); 1171 ATF_REQUIRE(tgr->tls_vmajor == en->tls_vmajor); 1172 /* XXX: Not sure if this is what OpenSSL expects? */ 1173 if (en->tls_vminor == TLS_MINOR_VER_THREE) 1174 ATF_REQUIRE(tgr->tls_vminor == TLS_MINOR_VER_TWO); 1175 else 1176 ATF_REQUIRE(tgr->tls_vminor == en->tls_vminor); 1177 ATF_REQUIRE(tgr->tls_length == htons(rv)); 1178 1179 return (rv); 1180 } 1181 1182 static void 1183 test_ktls_receive_app_data(struct tls_enable *en, uint64_t seqno, size_t len, 1184 size_t padding) 1185 { 1186 struct kevent ev; 1187 char *plaintext, *received, *outbuf; 1188 size_t outbuf_cap, outbuf_len, outbuf_sent, received_len, todo, written; 1189 ssize_t rv; 1190 int kq, sockets[2]; 1191 1192 plaintext = alloc_buffer(len); 1193 received = malloc(len); 1194 outbuf_cap = tls_header_len(en) + TLS_MAX_MSG_SIZE_V10_2 + 1195 tls_trailer_len(en); 1196 outbuf = malloc(outbuf_cap); 1197 1198 ATF_REQUIRE((kq = kqueue()) != -1); 1199 1200 ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets"); 1201 1202 ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1203 sizeof(*en)) == 0); 1204 1205 EV_SET(&ev, sockets[0], EVFILT_READ, EV_ADD, 0, 0, NULL); 1206 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1207 EV_SET(&ev, sockets[1], EVFILT_WRITE, EV_ADD, 0, 0, NULL); 1208 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1209 1210 received_len = 0; 1211 outbuf_len = 0; 1212 written = 0; 1213 1214 while (received_len != len) { 1215 ATF_REQUIRE(kevent(kq, NULL, 0, &ev, 1, NULL) == 1); 1216 1217 switch (ev.filter) { 1218 case EVFILT_WRITE: 1219 /* 1220 * Compose the next TLS record to send. 1221 */ 1222 if (outbuf_len == 0) { 1223 ATF_REQUIRE(written < len); 1224 todo = len - written; 1225 if (todo > TLS_MAX_MSG_SIZE_V10_2 - padding) 1226 todo = TLS_MAX_MSG_SIZE_V10_2 - padding; 1227 outbuf_len = encrypt_tls_record(en, 1228 TLS_RLTYPE_APP, seqno, plaintext + written, 1229 todo, outbuf, outbuf_cap, padding); 1230 outbuf_sent = 0; 1231 written += todo; 1232 seqno++; 1233 } 1234 1235 /* 1236 * Try to write the remainder of the current 1237 * TLS record. 1238 */ 1239 rv = write(ev.ident, outbuf + outbuf_sent, 1240 outbuf_len - outbuf_sent); 1241 ATF_REQUIRE_MSG(rv > 0, 1242 "failed to write to socket"); 1243 outbuf_sent += rv; 1244 if (outbuf_sent == outbuf_len) { 1245 outbuf_len = 0; 1246 if (written == len) { 1247 ev.flags = EV_DISABLE; 1248 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, 1249 NULL) == 0); 1250 } 1251 } 1252 break; 1253 1254 case EVFILT_READ: 1255 ATF_REQUIRE((ev.flags & EV_EOF) == 0); 1256 1257 rv = ktls_receive_tls_record(en, ev.ident, 1258 TLS_RLTYPE_APP, received + received_len, 1259 len - received_len); 1260 received_len += rv; 1261 break; 1262 } 1263 } 1264 1265 ATF_REQUIRE_MSG(written == received_len, 1266 "read %zu decrypted bytes, but wrote %zu", received_len, written); 1267 1268 ATF_REQUIRE(memcmp(plaintext, received, len) == 0); 1269 1270 free(outbuf); 1271 free(received); 1272 free(plaintext); 1273 1274 ATF_REQUIRE(close(sockets[1]) == 0); 1275 ATF_REQUIRE(close(sockets[0]) == 0); 1276 ATF_REQUIRE(close(kq) == 0); 1277 } 1278 1279 #define TLS_10_TESTS(M) \ 1280 M(aes128_cbc_1_0_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1281 CRYPTO_SHA1_HMAC) \ 1282 M(aes256_cbc_1_0_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1283 CRYPTO_SHA1_HMAC) 1284 1285 #define TLS_13_TESTS(M) \ 1286 M(aes128_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1287 TLS_MINOR_VER_THREE) \ 1288 M(aes256_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0, \ 1289 TLS_MINOR_VER_THREE) \ 1290 M(chacha20_poly1305_1_3, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1291 TLS_MINOR_VER_THREE) 1292 1293 #define AES_CBC_TESTS(M) \ 1294 M(aes128_cbc_1_0_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1295 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ZERO) \ 1296 M(aes256_cbc_1_0_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1297 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ZERO) \ 1298 M(aes128_cbc_1_1_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1299 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ONE) \ 1300 M(aes256_cbc_1_1_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1301 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ONE) \ 1302 M(aes128_cbc_1_2_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1303 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_TWO) \ 1304 M(aes256_cbc_1_2_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1305 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_TWO) \ 1306 M(aes128_cbc_1_2_sha256, CRYPTO_AES_CBC, 128 / 8, \ 1307 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_TWO) \ 1308 M(aes256_cbc_1_2_sha256, CRYPTO_AES_CBC, 256 / 8, \ 1309 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_TWO) \ 1310 M(aes128_cbc_1_2_sha384, CRYPTO_AES_CBC, 128 / 8, \ 1311 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_TWO) \ 1312 M(aes256_cbc_1_2_sha384, CRYPTO_AES_CBC, 256 / 8, \ 1313 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_TWO) \ 1314 1315 #define AES_GCM_TESTS(M) \ 1316 M(aes128_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1317 TLS_MINOR_VER_TWO) \ 1318 M(aes256_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0, \ 1319 TLS_MINOR_VER_TWO) \ 1320 M(aes128_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1321 TLS_MINOR_VER_THREE) \ 1322 M(aes256_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0, \ 1323 TLS_MINOR_VER_THREE) 1324 1325 #define CHACHA20_TESTS(M) \ 1326 M(chacha20_poly1305_1_2, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1327 TLS_MINOR_VER_TWO) \ 1328 M(chacha20_poly1305_1_3, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1329 TLS_MINOR_VER_THREE) 1330 1331 #define GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1332 auth_alg, minor, name, len) \ 1333 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_##name); \ 1334 ATF_TC_BODY(ktls_transmit_##cipher_name##_##name, tc) \ 1335 { \ 1336 struct tls_enable en; \ 1337 uint64_t seqno; \ 1338 \ 1339 ATF_REQUIRE_KTLS(); \ 1340 seqno = random(); \ 1341 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1342 &en); \ 1343 test_ktls_transmit_app_data(&en, seqno, len); \ 1344 free_tls_enable(&en); \ 1345 } 1346 1347 #define ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1348 auth_alg, minor, name) \ 1349 ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_##name); 1350 1351 #define GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1352 auth_alg, minor, name, type, len) \ 1353 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_##name); \ 1354 ATF_TC_BODY(ktls_transmit_##cipher_name##_##name, tc) \ 1355 { \ 1356 struct tls_enable en; \ 1357 uint64_t seqno; \ 1358 \ 1359 ATF_REQUIRE_KTLS(); \ 1360 seqno = random(); \ 1361 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1362 &en); \ 1363 test_ktls_transmit_control(&en, seqno, type, len); \ 1364 free_tls_enable(&en); \ 1365 } 1366 1367 #define ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1368 auth_alg, minor, name) \ 1369 ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_##name); 1370 1371 #define GEN_TRANSMIT_EMPTY_FRAGMENT_TEST(cipher_name, cipher_alg, \ 1372 key_size, auth_alg) \ 1373 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_empty_fragment); \ 1374 ATF_TC_BODY(ktls_transmit_##cipher_name##_empty_fragment, tc) \ 1375 { \ 1376 struct tls_enable en; \ 1377 uint64_t seqno; \ 1378 \ 1379 ATF_REQUIRE_KTLS(); \ 1380 seqno = random(); \ 1381 build_tls_enable(cipher_alg, key_size, auth_alg, \ 1382 TLS_MINOR_VER_ZERO, seqno, &en); \ 1383 test_ktls_transmit_empty_fragment(&en, seqno); \ 1384 free_tls_enable(&en); \ 1385 } 1386 1387 #define ADD_TRANSMIT_EMPTY_FRAGMENT_TEST(cipher_name, cipher_alg, \ 1388 key_size, auth_alg) \ 1389 ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_empty_fragment); 1390 1391 #define GEN_TRANSMIT_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 1392 minor) \ 1393 GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1394 auth_alg, minor, short, 64) \ 1395 GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1396 auth_alg, minor, long, 64 * 1024) \ 1397 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1398 auth_alg, minor, control, 0x21 /* Alert */, 32) 1399 1400 #define ADD_TRANSMIT_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 1401 minor) \ 1402 ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1403 auth_alg, minor, short) \ 1404 ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1405 auth_alg, minor, long) \ 1406 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1407 auth_alg, minor, control) 1408 1409 /* 1410 * For each supported cipher suite, run three transmit tests: 1411 * 1412 * - a short test which sends 64 bytes of application data (likely as 1413 * a single TLS record) 1414 * 1415 * - a long test which sends 64KB of application data (split across 1416 * multiple TLS records) 1417 * 1418 * - a control test which sends a single record with a specific 1419 * content type via sendmsg() 1420 */ 1421 AES_CBC_TESTS(GEN_TRANSMIT_TESTS); 1422 AES_GCM_TESTS(GEN_TRANSMIT_TESTS); 1423 CHACHA20_TESTS(GEN_TRANSMIT_TESTS); 1424 1425 #define GEN_TRANSMIT_PADDING_TESTS(cipher_name, cipher_alg, key_size, \ 1426 auth_alg, minor) \ 1427 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1428 auth_alg, minor, padding_1, 0x21 /* Alert */, 1) \ 1429 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1430 auth_alg, minor, padding_2, 0x21 /* Alert */, 2) \ 1431 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1432 auth_alg, minor, padding_3, 0x21 /* Alert */, 3) \ 1433 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1434 auth_alg, minor, padding_4, 0x21 /* Alert */, 4) \ 1435 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1436 auth_alg, minor, padding_5, 0x21 /* Alert */, 5) \ 1437 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1438 auth_alg, minor, padding_6, 0x21 /* Alert */, 6) \ 1439 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1440 auth_alg, minor, padding_7, 0x21 /* Alert */, 7) \ 1441 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1442 auth_alg, minor, padding_8, 0x21 /* Alert */, 8) \ 1443 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1444 auth_alg, minor, padding_9, 0x21 /* Alert */, 9) \ 1445 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1446 auth_alg, minor, padding_10, 0x21 /* Alert */, 10) \ 1447 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1448 auth_alg, minor, padding_11, 0x21 /* Alert */, 11) \ 1449 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1450 auth_alg, minor, padding_12, 0x21 /* Alert */, 12) \ 1451 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1452 auth_alg, minor, padding_13, 0x21 /* Alert */, 13) \ 1453 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1454 auth_alg, minor, padding_14, 0x21 /* Alert */, 14) \ 1455 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1456 auth_alg, minor, padding_15, 0x21 /* Alert */, 15) \ 1457 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1458 auth_alg, minor, padding_16, 0x21 /* Alert */, 16) 1459 1460 #define ADD_TRANSMIT_PADDING_TESTS(cipher_name, cipher_alg, key_size, \ 1461 auth_alg, minor) \ 1462 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1463 auth_alg, minor, padding_1) \ 1464 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1465 auth_alg, minor, padding_2) \ 1466 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1467 auth_alg, minor, padding_3) \ 1468 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1469 auth_alg, minor, padding_4) \ 1470 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1471 auth_alg, minor, padding_5) \ 1472 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1473 auth_alg, minor, padding_6) \ 1474 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1475 auth_alg, minor, padding_7) \ 1476 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1477 auth_alg, minor, padding_8) \ 1478 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1479 auth_alg, minor, padding_9) \ 1480 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1481 auth_alg, minor, padding_10) \ 1482 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1483 auth_alg, minor, padding_11) \ 1484 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1485 auth_alg, minor, padding_12) \ 1486 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1487 auth_alg, minor, padding_13) \ 1488 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1489 auth_alg, minor, padding_14) \ 1490 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1491 auth_alg, minor, padding_15) \ 1492 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1493 auth_alg, minor, padding_16) 1494 1495 /* 1496 * For AES-CBC MTE cipher suites using padding, add tests of messages 1497 * with each possible padding size. Note that the padding_<N> tests 1498 * do not necessarily test <N> bytes of padding as the padding is a 1499 * function of the cipher suite's MAC length. However, cycling 1500 * through all of the payload sizes from 1 to 16 should exercise all 1501 * of the possible padding lengths for each suite. 1502 */ 1503 AES_CBC_TESTS(GEN_TRANSMIT_PADDING_TESTS); 1504 1505 /* 1506 * Test "empty fragments" which are TLS records with no payload that 1507 * OpenSSL can send for TLS 1.0 connections. 1508 */ 1509 TLS_10_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST); 1510 1511 static void 1512 test_ktls_invalid_transmit_cipher_suite(struct tls_enable *en) 1513 { 1514 int sockets[2]; 1515 1516 ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets"); 1517 1518 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 1519 sizeof(*en)) == -1); 1520 ATF_REQUIRE(errno == EINVAL); 1521 1522 ATF_REQUIRE(close(sockets[1]) == 0); 1523 ATF_REQUIRE(close(sockets[0]) == 0); 1524 } 1525 1526 #define GEN_INVALID_TRANSMIT_TEST(name, cipher_alg, key_size, auth_alg, \ 1527 minor) \ 1528 ATF_TC_WITHOUT_HEAD(ktls_transmit_invalid_##name); \ 1529 ATF_TC_BODY(ktls_transmit_invalid_##name, tc) \ 1530 { \ 1531 struct tls_enable en; \ 1532 uint64_t seqno; \ 1533 \ 1534 ATF_REQUIRE_KTLS(); \ 1535 seqno = random(); \ 1536 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1537 &en); \ 1538 test_ktls_invalid_transmit_cipher_suite(&en); \ 1539 free_tls_enable(&en); \ 1540 } 1541 1542 #define ADD_INVALID_TRANSMIT_TEST(name, cipher_alg, key_size, auth_alg, \ 1543 minor) \ 1544 ATF_TP_ADD_TC(tp, ktls_transmit_invalid_##name); 1545 1546 #define INVALID_CIPHER_SUITES(M) \ 1547 M(aes128_cbc_1_0_sha256, CRYPTO_AES_CBC, 128 / 8, \ 1548 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_ZERO) \ 1549 M(aes128_cbc_1_0_sha384, CRYPTO_AES_CBC, 128 / 8, \ 1550 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_ZERO) \ 1551 M(aes128_gcm_1_0, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1552 TLS_MINOR_VER_ZERO) \ 1553 M(chacha20_poly1305_1_0, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1554 TLS_MINOR_VER_ZERO) \ 1555 M(aes128_cbc_1_1_sha256, CRYPTO_AES_CBC, 128 / 8, \ 1556 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_ONE) \ 1557 M(aes128_cbc_1_1_sha384, CRYPTO_AES_CBC, 128 / 8, \ 1558 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_ONE) \ 1559 M(aes128_gcm_1_1, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1560 TLS_MINOR_VER_ONE) \ 1561 M(chacha20_poly1305_1_1, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1562 TLS_MINOR_VER_ONE) \ 1563 M(aes128_cbc_1_3_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1564 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_THREE) \ 1565 M(aes128_cbc_1_3_sha256, CRYPTO_AES_CBC, 128 / 8, \ 1566 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_THREE) \ 1567 M(aes128_cbc_1_3_sha384, CRYPTO_AES_CBC, 128 / 8, \ 1568 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_THREE) 1569 1570 /* 1571 * Ensure that invalid cipher suites are rejected for transmit. 1572 */ 1573 INVALID_CIPHER_SUITES(GEN_INVALID_TRANSMIT_TEST); 1574 1575 #define GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1576 auth_alg, minor, name, len, padding) \ 1577 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_##name); \ 1578 ATF_TC_BODY(ktls_receive_##cipher_name##_##name, tc) \ 1579 { \ 1580 struct tls_enable en; \ 1581 uint64_t seqno; \ 1582 \ 1583 ATF_REQUIRE_KTLS(); \ 1584 seqno = random(); \ 1585 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1586 &en); \ 1587 test_ktls_receive_app_data(&en, seqno, len, padding); \ 1588 free_tls_enable(&en); \ 1589 } 1590 1591 #define ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1592 auth_alg, minor, name) \ 1593 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_##name); 1594 1595 #define GEN_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 1596 minor) \ 1597 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1598 auth_alg, minor, short, 64, 0) \ 1599 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1600 auth_alg, minor, long, 64 * 1024, 0) 1601 1602 #define ADD_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 1603 minor) \ 1604 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1605 auth_alg, minor, short) \ 1606 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1607 auth_alg, minor, long) 1608 1609 /* 1610 * For each supported cipher suite, run two receive tests: 1611 * 1612 * - a short test which sends 64 bytes of application data (likely as 1613 * a single TLS record) 1614 * 1615 * - a long test which sends 64KB of application data (split across 1616 * multiple TLS records) 1617 * 1618 * Note that receive is currently only supported for TLS 1.2 AEAD 1619 * cipher suites. 1620 */ 1621 AES_GCM_TESTS(GEN_RECEIVE_TESTS); 1622 CHACHA20_TESTS(GEN_RECEIVE_TESTS); 1623 1624 #define GEN_PADDING_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, \ 1625 auth_alg, minor) \ 1626 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1627 auth_alg, minor, short_padded, 64, 16) \ 1628 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1629 auth_alg, minor, long_padded, 64 * 1024, 15) 1630 1631 #define ADD_PADDING_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, \ 1632 auth_alg, minor) \ 1633 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1634 auth_alg, minor, short_padded) \ 1635 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1636 auth_alg, minor, long_padded) 1637 1638 /* 1639 * For TLS 1.3 cipher suites, run two additional receive tests which 1640 * use add padding to each record. 1641 */ 1642 TLS_13_TESTS(GEN_PADDING_RECEIVE_TESTS); 1643 1644 static void 1645 test_ktls_invalid_receive_cipher_suite(struct tls_enable *en) 1646 { 1647 int sockets[2]; 1648 1649 ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets"); 1650 1651 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1652 sizeof(*en)) == -1); 1653 ATF_REQUIRE(errno == EINVAL); 1654 1655 ATF_REQUIRE(close(sockets[1]) == 0); 1656 ATF_REQUIRE(close(sockets[0]) == 0); 1657 } 1658 1659 #define GEN_INVALID_RECEIVE_TEST(name, cipher_alg, key_size, auth_alg, \ 1660 minor) \ 1661 ATF_TC_WITHOUT_HEAD(ktls_receive_invalid_##name); \ 1662 ATF_TC_BODY(ktls_receive_invalid_##name, tc) \ 1663 { \ 1664 struct tls_enable en; \ 1665 uint64_t seqno; \ 1666 \ 1667 ATF_REQUIRE_KTLS(); \ 1668 seqno = random(); \ 1669 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1670 &en); \ 1671 test_ktls_invalid_receive_cipher_suite(&en); \ 1672 free_tls_enable(&en); \ 1673 } 1674 1675 #define ADD_INVALID_RECEIVE_TEST(name, cipher_alg, key_size, auth_alg, \ 1676 minor) \ 1677 ATF_TP_ADD_TC(tp, ktls_receive_invalid_##name); 1678 1679 /* 1680 * Ensure that invalid cipher suites are rejected for receive. 1681 */ 1682 INVALID_CIPHER_SUITES(GEN_INVALID_RECEIVE_TEST); 1683 1684 static void 1685 test_ktls_unsupported_receive_cipher_suite(struct tls_enable *en) 1686 { 1687 int sockets[2]; 1688 1689 ATF_REQUIRE_MSG(socketpair_tcp(sockets), "failed to create sockets"); 1690 1691 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1692 sizeof(*en)) == -1); 1693 ATF_REQUIRE(errno == EPROTONOSUPPORT); 1694 1695 ATF_REQUIRE(close(sockets[1]) == 0); 1696 ATF_REQUIRE(close(sockets[0]) == 0); 1697 } 1698 1699 #define GEN_UNSUPPORTED_RECEIVE_TEST(name, cipher_alg, key_size, \ 1700 auth_alg, minor) \ 1701 ATF_TC_WITHOUT_HEAD(ktls_receive_unsupported_##name); \ 1702 ATF_TC_BODY(ktls_receive_unsupported_##name, tc) \ 1703 { \ 1704 struct tls_enable en; \ 1705 uint64_t seqno; \ 1706 \ 1707 ATF_REQUIRE_KTLS(); \ 1708 seqno = random(); \ 1709 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1710 &en); \ 1711 test_ktls_unsupported_receive_cipher_suite(&en); \ 1712 free_tls_enable(&en); \ 1713 } 1714 1715 #define ADD_UNSUPPORTED_RECEIVE_TEST(name, cipher_alg, key_size, \ 1716 auth_alg, minor) \ 1717 ATF_TP_ADD_TC(tp, ktls_receive_unsupported_##name); 1718 1719 /* 1720 * Ensure that valid cipher suites not supported for receive are 1721 * rejected. 1722 */ 1723 AES_CBC_TESTS(GEN_UNSUPPORTED_RECEIVE_TEST); 1724 1725 /* 1726 * Try to perform an invalid sendto(2) on a TXTLS-enabled socket, to exercise 1727 * KTLS error handling in the socket layer. 1728 */ 1729 ATF_TC_WITHOUT_HEAD(ktls_sendto_baddst); 1730 ATF_TC_BODY(ktls_sendto_baddst, tc) 1731 { 1732 char buf[32]; 1733 struct sockaddr_in dst; 1734 struct tls_enable en; 1735 ssize_t n; 1736 int s; 1737 1738 ATF_REQUIRE_KTLS(); 1739 1740 s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); 1741 ATF_REQUIRE(s >= 0); 1742 1743 build_tls_enable(CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, 1744 TLS_MINOR_VER_THREE, (uint64_t)random(), &en); 1745 1746 ATF_REQUIRE(setsockopt(s, IPPROTO_TCP, TCP_TXTLS_ENABLE, &en, 1747 sizeof(en)) == 0); 1748 1749 memset(&dst, 0, sizeof(dst)); 1750 dst.sin_family = AF_INET; 1751 dst.sin_len = sizeof(dst); 1752 dst.sin_addr.s_addr = htonl(INADDR_BROADCAST); 1753 dst.sin_port = htons(12345); 1754 1755 memset(buf, 0, sizeof(buf)); 1756 n = sendto(s, buf, sizeof(buf), 0, (struct sockaddr *)&dst, 1757 sizeof(dst)); 1758 1759 /* Can't transmit to the broadcast address over TCP. */ 1760 ATF_REQUIRE_ERRNO(EACCES, n == -1); 1761 ATF_REQUIRE(close(s) == 0); 1762 } 1763 1764 ATF_TP_ADD_TCS(tp) 1765 { 1766 /* Transmit tests */ 1767 AES_CBC_TESTS(ADD_TRANSMIT_TESTS); 1768 AES_GCM_TESTS(ADD_TRANSMIT_TESTS); 1769 CHACHA20_TESTS(ADD_TRANSMIT_TESTS); 1770 AES_CBC_TESTS(ADD_TRANSMIT_PADDING_TESTS); 1771 TLS_10_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST); 1772 INVALID_CIPHER_SUITES(ADD_INVALID_TRANSMIT_TEST); 1773 1774 /* Receive tests */ 1775 AES_CBC_TESTS(ADD_UNSUPPORTED_RECEIVE_TEST); 1776 AES_GCM_TESTS(ADD_RECEIVE_TESTS); 1777 CHACHA20_TESTS(ADD_RECEIVE_TESTS); 1778 TLS_13_TESTS(ADD_PADDING_RECEIVE_TESTS); 1779 INVALID_CIPHER_SUITES(ADD_INVALID_RECEIVE_TEST); 1780 1781 /* Miscellaneous */ 1782 ATF_TP_ADD_TC(tp, ktls_sendto_baddst); 1783 1784 return (atf_no_error()); 1785 } 1786