1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2021 Netflix Inc. 5 * Written by: John Baldwin <jhb@FreeBSD.org> 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/types.h> 30 #include <sys/endian.h> 31 #include <sys/event.h> 32 #include <sys/ktls.h> 33 #include <sys/socket.h> 34 #include <sys/sysctl.h> 35 #include <netinet/in.h> 36 #include <netinet/tcp.h> 37 #include <crypto/cryptodev.h> 38 #include <assert.h> 39 #include <err.h> 40 #include <fcntl.h> 41 #include <netdb.h> 42 #include <poll.h> 43 #include <stdbool.h> 44 #include <stdlib.h> 45 #include <atf-c.h> 46 47 #include <openssl/err.h> 48 #include <openssl/evp.h> 49 #include <openssl/hmac.h> 50 51 static void 52 require_ktls(void) 53 { 54 size_t len; 55 bool enable; 56 57 len = sizeof(enable); 58 if (sysctlbyname("kern.ipc.tls.enable", &enable, &len, NULL, 0) == -1) { 59 if (errno == ENOENT) 60 atf_tc_skip("kernel does not support TLS offload"); 61 atf_libc_error(errno, "Failed to read kern.ipc.tls.enable"); 62 } 63 64 if (!enable) 65 atf_tc_skip("Kernel TLS is disabled"); 66 } 67 68 #define ATF_REQUIRE_KTLS() require_ktls() 69 70 static void 71 check_tls_mode(const atf_tc_t *tc, int s, int sockopt) 72 { 73 if (atf_tc_get_config_var_as_bool_wd(tc, "ktls.require_ifnet", false)) { 74 socklen_t len; 75 int mode; 76 77 len = sizeof(mode); 78 if (getsockopt(s, IPPROTO_TCP, sockopt, &mode, &len) == -1) 79 atf_libc_error(errno, "Failed to fetch TLS mode"); 80 81 if (mode != TCP_TLS_MODE_IFNET) 82 atf_tc_skip("connection did not use ifnet TLS"); 83 } 84 85 if (atf_tc_get_config_var_as_bool_wd(tc, "ktls.require_toe", false)) { 86 socklen_t len; 87 int mode; 88 89 len = sizeof(mode); 90 if (getsockopt(s, IPPROTO_TCP, sockopt, &mode, &len) == -1) 91 atf_libc_error(errno, "Failed to fetch TLS mode"); 92 93 if (mode != TCP_TLS_MODE_TOE) 94 atf_tc_skip("connection did not use TOE TLS"); 95 } 96 } 97 98 static char 99 rdigit(void) 100 { 101 /* ASCII printable values between 0x20 and 0x7e */ 102 return (0x20 + random() % (0x7f - 0x20)); 103 } 104 105 static char * 106 alloc_buffer(size_t len) 107 { 108 char *buf; 109 size_t i; 110 111 if (len == 0) 112 return (NULL); 113 buf = malloc(len); 114 for (i = 0; i < len; i++) 115 buf[i] = rdigit(); 116 return (buf); 117 } 118 119 static bool 120 socketpair_tcp(int sv[2]) 121 { 122 struct pollfd pfd; 123 struct sockaddr_in sin; 124 socklen_t len; 125 int as, cs, ls; 126 127 ls = socket(PF_INET, SOCK_STREAM, 0); 128 if (ls == -1) { 129 warn("socket() for listen"); 130 return (false); 131 } 132 133 memset(&sin, 0, sizeof(sin)); 134 sin.sin_len = sizeof(sin); 135 sin.sin_family = AF_INET; 136 sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK); 137 if (bind(ls, (struct sockaddr *)&sin, sizeof(sin)) == -1) { 138 warn("bind"); 139 close(ls); 140 return (false); 141 } 142 143 if (listen(ls, 1) == -1) { 144 warn("listen"); 145 close(ls); 146 return (false); 147 } 148 149 len = sizeof(sin); 150 if (getsockname(ls, (struct sockaddr *)&sin, &len) == -1) { 151 warn("getsockname"); 152 close(ls); 153 return (false); 154 } 155 156 cs = socket(PF_INET, SOCK_STREAM | SOCK_NONBLOCK, 0); 157 if (cs == -1) { 158 warn("socket() for connect"); 159 close(ls); 160 return (false); 161 } 162 163 if (connect(cs, (struct sockaddr *)&sin, sizeof(sin)) == -1) { 164 if (errno != EINPROGRESS) { 165 warn("connect"); 166 close(ls); 167 close(cs); 168 return (false); 169 } 170 } 171 172 as = accept4(ls, NULL, NULL, SOCK_NONBLOCK); 173 if (as == -1) { 174 warn("accept4"); 175 close(ls); 176 close(cs); 177 return (false); 178 } 179 180 close(ls); 181 182 pfd.fd = cs; 183 pfd.events = POLLOUT; 184 pfd.revents = 0; 185 ATF_REQUIRE(poll(&pfd, 1, INFTIM) == 1); 186 ATF_REQUIRE(pfd.revents == POLLOUT); 187 188 sv[0] = cs; 189 sv[1] = as; 190 return (true); 191 } 192 193 static bool 194 echo_socket(const atf_tc_t *tc, int sv[2]) 195 { 196 const char *cause, *host, *port; 197 struct addrinfo hints, *ai, *tofree; 198 int error, flags, s; 199 200 host = atf_tc_get_config_var(tc, "ktls.host"); 201 port = atf_tc_get_config_var_wd(tc, "ktls.port", "echo"); 202 memset(&hints, 0, sizeof(hints)); 203 hints.ai_family = AF_UNSPEC; 204 hints.ai_socktype = SOCK_STREAM; 205 hints.ai_protocol = IPPROTO_TCP; 206 error = getaddrinfo(host, port, &hints, &tofree); 207 if (error != 0) { 208 warnx("getaddrinfo(%s:%s) failed: %s", host, port, 209 gai_strerror(error)); 210 return (false); 211 } 212 213 cause = NULL; 214 for (ai = tofree; ai != NULL; ai = ai->ai_next) { 215 s = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol); 216 if (s == -1) { 217 cause = "socket"; 218 error = errno; 219 continue; 220 } 221 222 if (connect(s, ai->ai_addr, ai->ai_addrlen) == -1) { 223 cause = "connect"; 224 error = errno; 225 close(s); 226 continue; 227 } 228 229 freeaddrinfo(tofree); 230 231 ATF_REQUIRE((flags = fcntl(s, F_GETFL)) != -1); 232 flags |= O_NONBLOCK; 233 ATF_REQUIRE(fcntl(s, F_SETFL, flags) != -1); 234 235 sv[0] = s; 236 sv[1] = s; 237 return (true); 238 } 239 240 warnc(error, "%s", cause); 241 freeaddrinfo(tofree); 242 return (false); 243 } 244 245 static bool 246 open_sockets(const atf_tc_t *tc, int sv[2]) 247 { 248 if (atf_tc_has_config_var(tc, "ktls.host")) 249 return (echo_socket(tc, sv)); 250 else 251 return (socketpair_tcp(sv)); 252 } 253 254 static void 255 close_sockets(int sv[2]) 256 { 257 if (sv[0] != sv[1]) 258 ATF_REQUIRE(close(sv[1]) == 0); 259 ATF_REQUIRE(close(sv[0]) == 0); 260 } 261 262 static void 263 fd_set_blocking(int fd) 264 { 265 int flags; 266 267 ATF_REQUIRE((flags = fcntl(fd, F_GETFL)) != -1); 268 flags &= ~O_NONBLOCK; 269 ATF_REQUIRE(fcntl(fd, F_SETFL, flags) != -1); 270 } 271 272 static bool 273 cbc_crypt(const EVP_CIPHER *cipher, const char *key, const char *iv, 274 const char *input, char *output, size_t size, int enc) 275 { 276 EVP_CIPHER_CTX *ctx; 277 int outl, total; 278 279 ctx = EVP_CIPHER_CTX_new(); 280 if (ctx == NULL) { 281 warnx("EVP_CIPHER_CTX_new failed: %s", 282 ERR_error_string(ERR_get_error(), NULL)); 283 return (false); 284 } 285 if (EVP_CipherInit_ex(ctx, cipher, NULL, (const u_char *)key, 286 (const u_char *)iv, enc) != 1) { 287 warnx("EVP_CipherInit_ex failed: %s", 288 ERR_error_string(ERR_get_error(), NULL)); 289 EVP_CIPHER_CTX_free(ctx); 290 return (false); 291 } 292 EVP_CIPHER_CTX_set_padding(ctx, 0); 293 if (EVP_CipherUpdate(ctx, (u_char *)output, &outl, 294 (const u_char *)input, size) != 1) { 295 warnx("EVP_CipherUpdate failed: %s", 296 ERR_error_string(ERR_get_error(), NULL)); 297 EVP_CIPHER_CTX_free(ctx); 298 return (false); 299 } 300 total = outl; 301 if (EVP_CipherFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) { 302 warnx("EVP_CipherFinal_ex failed: %s", 303 ERR_error_string(ERR_get_error(), NULL)); 304 EVP_CIPHER_CTX_free(ctx); 305 return (false); 306 } 307 total += outl; 308 if ((size_t)total != size) { 309 warnx("decrypt size mismatch: %zu vs %d", size, total); 310 EVP_CIPHER_CTX_free(ctx); 311 return (false); 312 } 313 EVP_CIPHER_CTX_free(ctx); 314 return (true); 315 } 316 317 static bool 318 cbc_encrypt(const EVP_CIPHER *cipher, const char *key, const char *iv, 319 const char *input, char *output, size_t size) 320 { 321 return (cbc_crypt(cipher, key, iv, input, output, size, 1)); 322 } 323 324 static bool 325 cbc_decrypt(const EVP_CIPHER *cipher, const char *key, const char *iv, 326 const char *input, char *output, size_t size) 327 { 328 return (cbc_crypt(cipher, key, iv, input, output, size, 0)); 329 } 330 331 static bool 332 compute_hash(const EVP_MD *md, const void *key, size_t key_len, const void *aad, 333 size_t aad_len, const void *buffer, size_t len, void *digest, 334 u_int *digest_len) 335 { 336 HMAC_CTX *ctx; 337 338 ctx = HMAC_CTX_new(); 339 if (ctx == NULL) { 340 warnx("HMAC_CTX_new failed: %s", 341 ERR_error_string(ERR_get_error(), NULL)); 342 return (false); 343 } 344 if (HMAC_Init_ex(ctx, key, key_len, md, NULL) != 1) { 345 warnx("HMAC_Init_ex failed: %s", 346 ERR_error_string(ERR_get_error(), NULL)); 347 HMAC_CTX_free(ctx); 348 return (false); 349 } 350 if (HMAC_Update(ctx, aad, aad_len) != 1) { 351 warnx("HMAC_Update (aad) failed: %s", 352 ERR_error_string(ERR_get_error(), NULL)); 353 HMAC_CTX_free(ctx); 354 return (false); 355 } 356 if (HMAC_Update(ctx, buffer, len) != 1) { 357 warnx("HMAC_Update (payload) failed: %s", 358 ERR_error_string(ERR_get_error(), NULL)); 359 HMAC_CTX_free(ctx); 360 return (false); 361 } 362 if (HMAC_Final(ctx, digest, digest_len) != 1) { 363 warnx("HMAC_Final failed: %s", 364 ERR_error_string(ERR_get_error(), NULL)); 365 HMAC_CTX_free(ctx); 366 return (false); 367 } 368 HMAC_CTX_free(ctx); 369 return (true); 370 } 371 372 static bool 373 verify_hash(const EVP_MD *md, const void *key, size_t key_len, const void *aad, 374 size_t aad_len, const void *buffer, size_t len, const void *digest) 375 { 376 unsigned char digest2[EVP_MAX_MD_SIZE]; 377 u_int digest_len; 378 379 if (!compute_hash(md, key, key_len, aad, aad_len, buffer, len, digest2, 380 &digest_len)) 381 return (false); 382 if (memcmp(digest, digest2, digest_len) != 0) { 383 warnx("HMAC mismatch"); 384 return (false); 385 } 386 return (true); 387 } 388 389 static bool 390 aead_encrypt(const EVP_CIPHER *cipher, const char *key, const char *nonce, 391 const void *aad, size_t aad_len, const char *input, char *output, 392 size_t size, char *tag, size_t tag_len) 393 { 394 EVP_CIPHER_CTX *ctx; 395 int outl, total; 396 397 ctx = EVP_CIPHER_CTX_new(); 398 if (ctx == NULL) { 399 warnx("EVP_CIPHER_CTX_new failed: %s", 400 ERR_error_string(ERR_get_error(), NULL)); 401 return (false); 402 } 403 if (EVP_EncryptInit_ex(ctx, cipher, NULL, (const u_char *)key, 404 (const u_char *)nonce) != 1) { 405 warnx("EVP_EncryptInit_ex failed: %s", 406 ERR_error_string(ERR_get_error(), NULL)); 407 EVP_CIPHER_CTX_free(ctx); 408 return (false); 409 } 410 EVP_CIPHER_CTX_set_padding(ctx, 0); 411 if (aad != NULL) { 412 if (EVP_EncryptUpdate(ctx, NULL, &outl, (const u_char *)aad, 413 aad_len) != 1) { 414 warnx("EVP_EncryptUpdate for AAD failed: %s", 415 ERR_error_string(ERR_get_error(), NULL)); 416 EVP_CIPHER_CTX_free(ctx); 417 return (false); 418 } 419 } 420 if (EVP_EncryptUpdate(ctx, (u_char *)output, &outl, 421 (const u_char *)input, size) != 1) { 422 warnx("EVP_EncryptUpdate failed: %s", 423 ERR_error_string(ERR_get_error(), NULL)); 424 EVP_CIPHER_CTX_free(ctx); 425 return (false); 426 } 427 total = outl; 428 if (EVP_EncryptFinal_ex(ctx, (u_char *)output + outl, &outl) != 1) { 429 warnx("EVP_EncryptFinal_ex failed: %s", 430 ERR_error_string(ERR_get_error(), NULL)); 431 EVP_CIPHER_CTX_free(ctx); 432 return (false); 433 } 434 total += outl; 435 if ((size_t)total != size) { 436 warnx("encrypt size mismatch: %zu vs %d", size, total); 437 EVP_CIPHER_CTX_free(ctx); 438 return (false); 439 } 440 if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_GET_TAG, tag_len, tag) != 441 1) { 442 warnx("EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_GET_TAG) failed: %s", 443 ERR_error_string(ERR_get_error(), NULL)); 444 EVP_CIPHER_CTX_free(ctx); 445 return (false); 446 } 447 EVP_CIPHER_CTX_free(ctx); 448 return (true); 449 } 450 451 static bool 452 aead_decrypt(const EVP_CIPHER *cipher, const char *key, const char *nonce, 453 const void *aad, size_t aad_len, const char *input, char *output, 454 size_t size, const char *tag, size_t tag_len) 455 { 456 EVP_CIPHER_CTX *ctx; 457 int outl, total; 458 bool valid; 459 460 ctx = EVP_CIPHER_CTX_new(); 461 if (ctx == NULL) { 462 warnx("EVP_CIPHER_CTX_new failed: %s", 463 ERR_error_string(ERR_get_error(), NULL)); 464 return (false); 465 } 466 if (EVP_DecryptInit_ex(ctx, cipher, NULL, (const u_char *)key, 467 (const u_char *)nonce) != 1) { 468 warnx("EVP_DecryptInit_ex failed: %s", 469 ERR_error_string(ERR_get_error(), NULL)); 470 EVP_CIPHER_CTX_free(ctx); 471 return (false); 472 } 473 EVP_CIPHER_CTX_set_padding(ctx, 0); 474 if (aad != NULL) { 475 if (EVP_DecryptUpdate(ctx, NULL, &outl, (const u_char *)aad, 476 aad_len) != 1) { 477 warnx("EVP_DecryptUpdate for AAD failed: %s", 478 ERR_error_string(ERR_get_error(), NULL)); 479 EVP_CIPHER_CTX_free(ctx); 480 return (false); 481 } 482 } 483 if (EVP_DecryptUpdate(ctx, (u_char *)output, &outl, 484 (const u_char *)input, size) != 1) { 485 warnx("EVP_DecryptUpdate failed: %s", 486 ERR_error_string(ERR_get_error(), NULL)); 487 EVP_CIPHER_CTX_free(ctx); 488 return (false); 489 } 490 total = outl; 491 if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_AEAD_SET_TAG, tag_len, 492 __DECONST(char *, tag)) != 1) { 493 warnx("EVP_CIPHER_CTX_ctrl(EVP_CTRL_AEAD_SET_TAG) failed: %s", 494 ERR_error_string(ERR_get_error(), NULL)); 495 EVP_CIPHER_CTX_free(ctx); 496 return (false); 497 } 498 valid = (EVP_DecryptFinal_ex(ctx, (u_char *)output + outl, &outl) == 1); 499 total += outl; 500 if ((size_t)total != size) { 501 warnx("decrypt size mismatch: %zu vs %d", size, total); 502 EVP_CIPHER_CTX_free(ctx); 503 return (false); 504 } 505 if (!valid) 506 warnx("tag mismatch"); 507 EVP_CIPHER_CTX_free(ctx); 508 return (valid); 509 } 510 511 static void 512 build_tls_enable(int cipher_alg, size_t cipher_key_len, int auth_alg, 513 int minor, uint64_t seqno, struct tls_enable *en) 514 { 515 u_int auth_key_len, iv_len; 516 517 memset(en, 0, sizeof(*en)); 518 519 switch (cipher_alg) { 520 case CRYPTO_AES_CBC: 521 if (minor == TLS_MINOR_VER_ZERO) 522 iv_len = AES_BLOCK_LEN; 523 else 524 iv_len = 0; 525 break; 526 case CRYPTO_AES_NIST_GCM_16: 527 if (minor == TLS_MINOR_VER_TWO) 528 iv_len = TLS_AEAD_GCM_LEN; 529 else 530 iv_len = TLS_1_3_GCM_IV_LEN; 531 break; 532 case CRYPTO_CHACHA20_POLY1305: 533 iv_len = TLS_CHACHA20_IV_LEN; 534 break; 535 default: 536 iv_len = 0; 537 break; 538 } 539 switch (auth_alg) { 540 case CRYPTO_SHA1_HMAC: 541 auth_key_len = SHA1_HASH_LEN; 542 break; 543 case CRYPTO_SHA2_256_HMAC: 544 auth_key_len = SHA2_256_HASH_LEN; 545 break; 546 case CRYPTO_SHA2_384_HMAC: 547 auth_key_len = SHA2_384_HASH_LEN; 548 break; 549 default: 550 auth_key_len = 0; 551 break; 552 } 553 en->cipher_key = alloc_buffer(cipher_key_len); 554 en->iv = alloc_buffer(iv_len); 555 en->auth_key = alloc_buffer(auth_key_len); 556 en->cipher_algorithm = cipher_alg; 557 en->cipher_key_len = cipher_key_len; 558 en->iv_len = iv_len; 559 en->auth_algorithm = auth_alg; 560 en->auth_key_len = auth_key_len; 561 en->tls_vmajor = TLS_MAJOR_VER_ONE; 562 en->tls_vminor = minor; 563 be64enc(en->rec_seq, seqno); 564 } 565 566 static void 567 free_tls_enable(struct tls_enable *en) 568 { 569 free(__DECONST(void *, en->cipher_key)); 570 free(__DECONST(void *, en->iv)); 571 free(__DECONST(void *, en->auth_key)); 572 } 573 574 static const EVP_CIPHER * 575 tls_EVP_CIPHER(const struct tls_enable *en) 576 { 577 switch (en->cipher_algorithm) { 578 case CRYPTO_AES_CBC: 579 switch (en->cipher_key_len) { 580 case 128 / 8: 581 return (EVP_aes_128_cbc()); 582 case 256 / 8: 583 return (EVP_aes_256_cbc()); 584 default: 585 return (NULL); 586 } 587 break; 588 case CRYPTO_AES_NIST_GCM_16: 589 switch (en->cipher_key_len) { 590 case 128 / 8: 591 return (EVP_aes_128_gcm()); 592 case 256 / 8: 593 return (EVP_aes_256_gcm()); 594 default: 595 return (NULL); 596 } 597 break; 598 case CRYPTO_CHACHA20_POLY1305: 599 return (EVP_chacha20_poly1305()); 600 default: 601 return (NULL); 602 } 603 } 604 605 static const EVP_MD * 606 tls_EVP_MD(const struct tls_enable *en) 607 { 608 switch (en->auth_algorithm) { 609 case CRYPTO_SHA1_HMAC: 610 return (EVP_sha1()); 611 case CRYPTO_SHA2_256_HMAC: 612 return (EVP_sha256()); 613 case CRYPTO_SHA2_384_HMAC: 614 return (EVP_sha384()); 615 default: 616 return (NULL); 617 } 618 } 619 620 static size_t 621 tls_header_len(struct tls_enable *en) 622 { 623 size_t len; 624 625 len = sizeof(struct tls_record_layer); 626 switch (en->cipher_algorithm) { 627 case CRYPTO_AES_CBC: 628 if (en->tls_vminor != TLS_MINOR_VER_ZERO) 629 len += AES_BLOCK_LEN; 630 return (len); 631 case CRYPTO_AES_NIST_GCM_16: 632 if (en->tls_vminor == TLS_MINOR_VER_TWO) 633 len += sizeof(uint64_t); 634 return (len); 635 case CRYPTO_CHACHA20_POLY1305: 636 return (len); 637 default: 638 return (0); 639 } 640 } 641 642 static size_t 643 tls_mac_len(struct tls_enable *en) 644 { 645 switch (en->cipher_algorithm) { 646 case CRYPTO_AES_CBC: 647 switch (en->auth_algorithm) { 648 case CRYPTO_SHA1_HMAC: 649 return (SHA1_HASH_LEN); 650 case CRYPTO_SHA2_256_HMAC: 651 return (SHA2_256_HASH_LEN); 652 case CRYPTO_SHA2_384_HMAC: 653 return (SHA2_384_HASH_LEN); 654 default: 655 return (0); 656 } 657 case CRYPTO_AES_NIST_GCM_16: 658 return (AES_GMAC_HASH_LEN); 659 case CRYPTO_CHACHA20_POLY1305: 660 return (POLY1305_HASH_LEN); 661 default: 662 return (0); 663 } 664 } 665 666 /* Includes maximum padding for MTE. */ 667 static size_t 668 tls_trailer_len(struct tls_enable *en) 669 { 670 size_t len; 671 672 len = tls_mac_len(en); 673 if (en->cipher_algorithm == CRYPTO_AES_CBC) 674 len += AES_BLOCK_LEN; 675 if (en->tls_vminor == TLS_MINOR_VER_THREE) 676 len++; 677 return (len); 678 } 679 680 /* 'len' is the length of the payload application data. */ 681 static void 682 tls_mte_aad(struct tls_enable *en, size_t len, 683 const struct tls_record_layer *hdr, uint64_t seqno, struct tls_mac_data *ad) 684 { 685 ad->seq = htobe64(seqno); 686 ad->type = hdr->tls_type; 687 ad->tls_vmajor = hdr->tls_vmajor; 688 ad->tls_vminor = hdr->tls_vminor; 689 ad->tls_length = htons(len); 690 } 691 692 static void 693 tls_12_aead_aad(struct tls_enable *en, size_t len, 694 const struct tls_record_layer *hdr, uint64_t seqno, 695 struct tls_aead_data *ad) 696 { 697 ad->seq = htobe64(seqno); 698 ad->type = hdr->tls_type; 699 ad->tls_vmajor = hdr->tls_vmajor; 700 ad->tls_vminor = hdr->tls_vminor; 701 ad->tls_length = htons(len); 702 } 703 704 static void 705 tls_13_aad(struct tls_enable *en, const struct tls_record_layer *hdr, 706 uint64_t seqno, struct tls_aead_data_13 *ad) 707 { 708 ad->type = hdr->tls_type; 709 ad->tls_vmajor = hdr->tls_vmajor; 710 ad->tls_vminor = hdr->tls_vminor; 711 ad->tls_length = hdr->tls_length; 712 } 713 714 static void 715 tls_12_gcm_nonce(struct tls_enable *en, const struct tls_record_layer *hdr, 716 char *nonce) 717 { 718 memcpy(nonce, en->iv, TLS_AEAD_GCM_LEN); 719 memcpy(nonce + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t)); 720 } 721 722 static void 723 tls_13_nonce(struct tls_enable *en, uint64_t seqno, char *nonce) 724 { 725 static_assert(TLS_1_3_GCM_IV_LEN == TLS_CHACHA20_IV_LEN, 726 "TLS 1.3 nonce length mismatch"); 727 memcpy(nonce, en->iv, TLS_1_3_GCM_IV_LEN); 728 *(uint64_t *)(nonce + 4) ^= htobe64(seqno); 729 } 730 731 /* 732 * Decrypt a TLS record 'len' bytes long at 'src' and store the result at 733 * 'dst'. If the TLS record header length doesn't match or 'dst' doesn't 734 * have sufficient room ('avail'), fail the test. 735 */ 736 static size_t 737 decrypt_tls_aes_cbc_mte(struct tls_enable *en, uint64_t seqno, const void *src, 738 size_t len, void *dst, size_t avail, uint8_t *record_type) 739 { 740 const struct tls_record_layer *hdr; 741 struct tls_mac_data aad; 742 const char *iv; 743 char *buf; 744 size_t hdr_len, mac_len, payload_len; 745 int padding; 746 747 hdr = src; 748 hdr_len = tls_header_len(en); 749 mac_len = tls_mac_len(en); 750 ATF_REQUIRE(hdr->tls_vmajor == TLS_MAJOR_VER_ONE); 751 ATF_REQUIRE(hdr->tls_vminor == en->tls_vminor); 752 753 /* First, decrypt the outer payload into a temporary buffer. */ 754 payload_len = len - hdr_len; 755 buf = malloc(payload_len); 756 if (en->tls_vminor == TLS_MINOR_VER_ZERO) 757 iv = en->iv; 758 else 759 iv = (void *)(hdr + 1); 760 ATF_REQUIRE(cbc_decrypt(tls_EVP_CIPHER(en), en->cipher_key, iv, 761 (const u_char *)src + hdr_len, buf, payload_len)); 762 763 /* 764 * Copy the last encrypted block to use as the IV for the next 765 * record for TLS 1.0. 766 */ 767 if (en->tls_vminor == TLS_MINOR_VER_ZERO) 768 memcpy(__DECONST(uint8_t *, en->iv), (const u_char *)src + 769 (len - AES_BLOCK_LEN), AES_BLOCK_LEN); 770 771 /* 772 * Verify trailing padding and strip. 773 * 774 * The kernel always generates the smallest amount of padding. 775 */ 776 padding = buf[payload_len - 1] + 1; 777 ATF_REQUIRE(padding > 0 && padding <= AES_BLOCK_LEN); 778 ATF_REQUIRE(payload_len >= mac_len + padding); 779 payload_len -= padding; 780 781 /* Verify HMAC. */ 782 payload_len -= mac_len; 783 tls_mte_aad(en, payload_len, hdr, seqno, &aad); 784 ATF_REQUIRE(verify_hash(tls_EVP_MD(en), en->auth_key, en->auth_key_len, 785 &aad, sizeof(aad), buf, payload_len, buf + payload_len)); 786 787 ATF_REQUIRE(payload_len <= avail); 788 memcpy(dst, buf, payload_len); 789 *record_type = hdr->tls_type; 790 return (payload_len); 791 } 792 793 static size_t 794 decrypt_tls_12_aead(struct tls_enable *en, uint64_t seqno, const void *src, 795 size_t len, void *dst, uint8_t *record_type) 796 { 797 const struct tls_record_layer *hdr; 798 struct tls_aead_data aad; 799 char nonce[12]; 800 size_t hdr_len, mac_len, payload_len; 801 802 hdr = src; 803 804 hdr_len = tls_header_len(en); 805 mac_len = tls_mac_len(en); 806 payload_len = len - (hdr_len + mac_len); 807 ATF_REQUIRE(hdr->tls_vmajor == TLS_MAJOR_VER_ONE); 808 ATF_REQUIRE(hdr->tls_vminor == TLS_MINOR_VER_TWO); 809 810 tls_12_aead_aad(en, payload_len, hdr, seqno, &aad); 811 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 812 tls_12_gcm_nonce(en, hdr, nonce); 813 else 814 tls_13_nonce(en, seqno, nonce); 815 816 ATF_REQUIRE(aead_decrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 817 &aad, sizeof(aad), (const char *)src + hdr_len, dst, payload_len, 818 (const char *)src + hdr_len + payload_len, mac_len)); 819 820 *record_type = hdr->tls_type; 821 return (payload_len); 822 } 823 824 static size_t 825 decrypt_tls_13_aead(struct tls_enable *en, uint64_t seqno, const void *src, 826 size_t len, void *dst, uint8_t *record_type) 827 { 828 const struct tls_record_layer *hdr; 829 struct tls_aead_data_13 aad; 830 char nonce[12]; 831 char *buf; 832 size_t hdr_len, mac_len, payload_len; 833 834 hdr = src; 835 836 hdr_len = tls_header_len(en); 837 mac_len = tls_mac_len(en); 838 payload_len = len - (hdr_len + mac_len); 839 ATF_REQUIRE(payload_len >= 1); 840 ATF_REQUIRE(hdr->tls_type == TLS_RLTYPE_APP); 841 ATF_REQUIRE(hdr->tls_vmajor == TLS_MAJOR_VER_ONE); 842 ATF_REQUIRE(hdr->tls_vminor == TLS_MINOR_VER_TWO); 843 844 tls_13_aad(en, hdr, seqno, &aad); 845 tls_13_nonce(en, seqno, nonce); 846 847 /* 848 * Have to use a temporary buffer for the output due to the 849 * record type as the last byte of the trailer. 850 */ 851 buf = malloc(payload_len); 852 853 ATF_REQUIRE(aead_decrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 854 &aad, sizeof(aad), (const char *)src + hdr_len, buf, payload_len, 855 (const char *)src + hdr_len + payload_len, mac_len)); 856 857 /* Trim record type. */ 858 *record_type = buf[payload_len - 1]; 859 payload_len--; 860 861 memcpy(dst, buf, payload_len); 862 free(buf); 863 864 return (payload_len); 865 } 866 867 static size_t 868 decrypt_tls_aead(struct tls_enable *en, uint64_t seqno, const void *src, 869 size_t len, void *dst, size_t avail, uint8_t *record_type) 870 { 871 const struct tls_record_layer *hdr; 872 size_t payload_len; 873 874 hdr = src; 875 ATF_REQUIRE(ntohs(hdr->tls_length) + sizeof(*hdr) == len); 876 877 payload_len = len - (tls_header_len(en) + tls_trailer_len(en)); 878 ATF_REQUIRE(payload_len <= avail); 879 880 if (en->tls_vminor == TLS_MINOR_VER_TWO) { 881 ATF_REQUIRE(decrypt_tls_12_aead(en, seqno, src, len, dst, 882 record_type) == payload_len); 883 } else { 884 ATF_REQUIRE(decrypt_tls_13_aead(en, seqno, src, len, dst, 885 record_type) == payload_len); 886 } 887 888 return (payload_len); 889 } 890 891 static size_t 892 decrypt_tls_record(struct tls_enable *en, uint64_t seqno, const void *src, 893 size_t len, void *dst, size_t avail, uint8_t *record_type) 894 { 895 if (en->cipher_algorithm == CRYPTO_AES_CBC) 896 return (decrypt_tls_aes_cbc_mte(en, seqno, src, len, dst, avail, 897 record_type)); 898 else 899 return (decrypt_tls_aead(en, seqno, src, len, dst, avail, 900 record_type)); 901 } 902 903 /* 904 * Encrypt a TLS record of type 'record_type' with payload 'len' bytes 905 * long at 'src' and store the result at 'dst'. If 'dst' doesn't have 906 * sufficient room ('avail'), fail the test. 'padding' is the amount 907 * of additional padding to include beyond any amount mandated by the 908 * cipher suite. 909 */ 910 static size_t 911 encrypt_tls_aes_cbc_mte(struct tls_enable *en, uint8_t record_type, 912 uint64_t seqno, const void *src, size_t len, void *dst, size_t avail, 913 size_t padding) 914 { 915 struct tls_record_layer *hdr; 916 struct tls_mac_data aad; 917 char *buf, *iv; 918 size_t hdr_len, mac_len, record_len; 919 u_int digest_len, i; 920 921 ATF_REQUIRE(padding % 16 == 0); 922 923 hdr = dst; 924 buf = dst; 925 926 hdr_len = tls_header_len(en); 927 mac_len = tls_mac_len(en); 928 padding += (AES_BLOCK_LEN - (len + mac_len) % AES_BLOCK_LEN); 929 ATF_REQUIRE(padding > 0 && padding <= 255); 930 931 record_len = hdr_len + len + mac_len + padding; 932 ATF_REQUIRE(record_len <= avail); 933 934 hdr->tls_type = record_type; 935 hdr->tls_vmajor = TLS_MAJOR_VER_ONE; 936 hdr->tls_vminor = en->tls_vminor; 937 hdr->tls_length = htons(record_len - sizeof(*hdr)); 938 iv = (char *)(hdr + 1); 939 for (i = 0; i < AES_BLOCK_LEN; i++) 940 iv[i] = rdigit(); 941 942 /* Copy plaintext to ciphertext region. */ 943 memcpy(buf + hdr_len, src, len); 944 945 /* Compute HMAC. */ 946 tls_mte_aad(en, len, hdr, seqno, &aad); 947 ATF_REQUIRE(compute_hash(tls_EVP_MD(en), en->auth_key, en->auth_key_len, 948 &aad, sizeof(aad), src, len, buf + hdr_len + len, &digest_len)); 949 ATF_REQUIRE(digest_len == mac_len); 950 951 /* Store padding. */ 952 for (i = 0; i < padding; i++) 953 buf[hdr_len + len + mac_len + i] = padding - 1; 954 955 /* Encrypt the record. */ 956 ATF_REQUIRE(cbc_encrypt(tls_EVP_CIPHER(en), en->cipher_key, iv, 957 buf + hdr_len, buf + hdr_len, len + mac_len + padding)); 958 959 return (record_len); 960 } 961 962 static size_t 963 encrypt_tls_12_aead(struct tls_enable *en, uint8_t record_type, uint64_t seqno, 964 const void *src, size_t len, void *dst) 965 { 966 struct tls_record_layer *hdr; 967 struct tls_aead_data aad; 968 char nonce[12]; 969 size_t hdr_len, mac_len, record_len; 970 971 hdr = dst; 972 973 hdr_len = tls_header_len(en); 974 mac_len = tls_mac_len(en); 975 record_len = hdr_len + len + mac_len; 976 977 hdr->tls_type = record_type; 978 hdr->tls_vmajor = TLS_MAJOR_VER_ONE; 979 hdr->tls_vminor = TLS_MINOR_VER_TWO; 980 hdr->tls_length = htons(record_len - sizeof(*hdr)); 981 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 982 memcpy(hdr + 1, &seqno, sizeof(seqno)); 983 984 tls_12_aead_aad(en, len, hdr, seqno, &aad); 985 if (en->cipher_algorithm == CRYPTO_AES_NIST_GCM_16) 986 tls_12_gcm_nonce(en, hdr, nonce); 987 else 988 tls_13_nonce(en, seqno, nonce); 989 990 ATF_REQUIRE(aead_encrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 991 &aad, sizeof(aad), src, (char *)dst + hdr_len, len, 992 (char *)dst + hdr_len + len, mac_len)); 993 994 return (record_len); 995 } 996 997 static size_t 998 encrypt_tls_13_aead(struct tls_enable *en, uint8_t record_type, uint64_t seqno, 999 const void *src, size_t len, void *dst, size_t padding) 1000 { 1001 struct tls_record_layer *hdr; 1002 struct tls_aead_data_13 aad; 1003 char nonce[12]; 1004 char *buf; 1005 size_t hdr_len, mac_len, record_len; 1006 1007 hdr = dst; 1008 1009 hdr_len = tls_header_len(en); 1010 mac_len = tls_mac_len(en); 1011 record_len = hdr_len + len + 1 + padding + mac_len; 1012 1013 hdr->tls_type = TLS_RLTYPE_APP; 1014 hdr->tls_vmajor = TLS_MAJOR_VER_ONE; 1015 hdr->tls_vminor = TLS_MINOR_VER_TWO; 1016 hdr->tls_length = htons(record_len - sizeof(*hdr)); 1017 1018 tls_13_aad(en, hdr, seqno, &aad); 1019 tls_13_nonce(en, seqno, nonce); 1020 1021 /* 1022 * Have to use a temporary buffer for the input so that the record 1023 * type can be appended. 1024 */ 1025 buf = malloc(len + 1 + padding); 1026 memcpy(buf, src, len); 1027 buf[len] = record_type; 1028 memset(buf + len + 1, 0, padding); 1029 1030 ATF_REQUIRE(aead_encrypt(tls_EVP_CIPHER(en), en->cipher_key, nonce, 1031 &aad, sizeof(aad), buf, (char *)dst + hdr_len, len + 1 + padding, 1032 (char *)dst + hdr_len + len + 1 + padding, mac_len)); 1033 1034 free(buf); 1035 1036 return (record_len); 1037 } 1038 1039 static size_t 1040 encrypt_tls_aead(struct tls_enable *en, uint8_t record_type, uint64_t seqno, 1041 const void *src, size_t len, void *dst, size_t avail, size_t padding) 1042 { 1043 size_t record_len; 1044 1045 record_len = tls_header_len(en) + len + padding + tls_trailer_len(en); 1046 ATF_REQUIRE(record_len <= avail); 1047 1048 if (en->tls_vminor == TLS_MINOR_VER_TWO) { 1049 ATF_REQUIRE(padding == 0); 1050 ATF_REQUIRE(encrypt_tls_12_aead(en, record_type, seqno, src, 1051 len, dst) == record_len); 1052 } else 1053 ATF_REQUIRE(encrypt_tls_13_aead(en, record_type, seqno, src, 1054 len, dst, padding) == record_len); 1055 1056 return (record_len); 1057 } 1058 1059 static size_t 1060 encrypt_tls_record(struct tls_enable *en, uint8_t record_type, uint64_t seqno, 1061 const void *src, size_t len, void *dst, size_t avail, size_t padding) 1062 { 1063 if (en->cipher_algorithm == CRYPTO_AES_CBC) 1064 return (encrypt_tls_aes_cbc_mte(en, record_type, seqno, src, 1065 len, dst, avail, padding)); 1066 else 1067 return (encrypt_tls_aead(en, record_type, seqno, src, len, 1068 dst, avail, padding)); 1069 } 1070 1071 static void 1072 test_ktls_transmit_app_data(const atf_tc_t *tc, struct tls_enable *en, 1073 uint64_t seqno, size_t len) 1074 { 1075 struct kevent ev; 1076 struct tls_record_layer *hdr; 1077 char *plaintext, *decrypted, *outbuf; 1078 size_t decrypted_len, outbuf_len, outbuf_cap, record_len, written; 1079 ssize_t rv; 1080 int kq, sockets[2]; 1081 uint8_t record_type; 1082 1083 plaintext = alloc_buffer(len); 1084 decrypted = malloc(len); 1085 outbuf_cap = tls_header_len(en) + TLS_MAX_MSG_SIZE_V10_2 + 1086 tls_trailer_len(en); 1087 outbuf = malloc(outbuf_cap); 1088 hdr = (struct tls_record_layer *)outbuf; 1089 1090 ATF_REQUIRE((kq = kqueue()) != -1); 1091 1092 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1093 1094 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 1095 sizeof(*en)) == 0); 1096 check_tls_mode(tc, sockets[1], TCP_TXTLS_MODE); 1097 1098 EV_SET(&ev, sockets[0], EVFILT_READ, EV_ADD, 0, 0, NULL); 1099 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1100 EV_SET(&ev, sockets[1], EVFILT_WRITE, EV_ADD, 0, 0, NULL); 1101 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1102 1103 decrypted_len = 0; 1104 outbuf_len = 0; 1105 written = 0; 1106 1107 while (decrypted_len != len) { 1108 ATF_REQUIRE(kevent(kq, NULL, 0, &ev, 1, NULL) == 1); 1109 1110 switch (ev.filter) { 1111 case EVFILT_WRITE: 1112 /* Try to write any remaining data. */ 1113 rv = write(ev.ident, plaintext + written, 1114 len - written); 1115 ATF_REQUIRE_MSG(rv > 0, 1116 "failed to write to socket"); 1117 written += rv; 1118 if (written == len) { 1119 ev.flags = EV_DISABLE; 1120 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, 1121 NULL) == 0); 1122 } 1123 break; 1124 1125 case EVFILT_READ: 1126 ATF_REQUIRE((ev.flags & EV_EOF) == 0); 1127 1128 /* 1129 * Try to read data for the next TLS record 1130 * into outbuf. Start by reading the header 1131 * to determine how much additional data to 1132 * read. 1133 */ 1134 if (outbuf_len < sizeof(struct tls_record_layer)) { 1135 rv = read(ev.ident, outbuf + outbuf_len, 1136 sizeof(struct tls_record_layer) - 1137 outbuf_len); 1138 ATF_REQUIRE_MSG(rv > 0, 1139 "failed to read from socket"); 1140 outbuf_len += rv; 1141 } 1142 1143 if (outbuf_len < sizeof(struct tls_record_layer)) 1144 break; 1145 1146 record_len = sizeof(struct tls_record_layer) + 1147 ntohs(hdr->tls_length); 1148 ATF_REQUIRE(record_len <= outbuf_cap); 1149 ATF_REQUIRE(record_len > outbuf_len); 1150 rv = read(ev.ident, outbuf + outbuf_len, 1151 record_len - outbuf_len); 1152 if (rv == -1 && errno == EAGAIN) 1153 break; 1154 ATF_REQUIRE_MSG(rv > 0, "failed to read from socket"); 1155 1156 outbuf_len += rv; 1157 if (outbuf_len == record_len) { 1158 decrypted_len += decrypt_tls_record(en, seqno, 1159 outbuf, outbuf_len, 1160 decrypted + decrypted_len, 1161 len - decrypted_len, &record_type); 1162 ATF_REQUIRE(record_type == TLS_RLTYPE_APP); 1163 1164 seqno++; 1165 outbuf_len = 0; 1166 } 1167 break; 1168 } 1169 } 1170 1171 ATF_REQUIRE_MSG(written == decrypted_len, 1172 "read %zu decrypted bytes, but wrote %zu", decrypted_len, written); 1173 1174 ATF_REQUIRE(memcmp(plaintext, decrypted, len) == 0); 1175 1176 free(outbuf); 1177 free(decrypted); 1178 free(plaintext); 1179 1180 close_sockets(sockets); 1181 ATF_REQUIRE(close(kq) == 0); 1182 } 1183 1184 static void 1185 ktls_send_control_message(int fd, uint8_t type, void *data, size_t len) 1186 { 1187 struct msghdr msg; 1188 struct cmsghdr *cmsg; 1189 char cbuf[CMSG_SPACE(sizeof(type))]; 1190 struct iovec iov; 1191 1192 memset(&msg, 0, sizeof(msg)); 1193 1194 msg.msg_control = cbuf; 1195 msg.msg_controllen = sizeof(cbuf); 1196 cmsg = CMSG_FIRSTHDR(&msg); 1197 cmsg->cmsg_level = IPPROTO_TCP; 1198 cmsg->cmsg_type = TLS_SET_RECORD_TYPE; 1199 cmsg->cmsg_len = CMSG_LEN(sizeof(type)); 1200 *(uint8_t *)CMSG_DATA(cmsg) = type; 1201 1202 iov.iov_base = data; 1203 iov.iov_len = len; 1204 msg.msg_iov = &iov; 1205 msg.msg_iovlen = 1; 1206 1207 ATF_REQUIRE(sendmsg(fd, &msg, 0) == (ssize_t)len); 1208 } 1209 1210 static void 1211 test_ktls_transmit_control(const atf_tc_t *tc, struct tls_enable *en, 1212 uint64_t seqno, uint8_t type, size_t len) 1213 { 1214 struct tls_record_layer *hdr; 1215 char *plaintext, *decrypted, *outbuf; 1216 size_t outbuf_cap, payload_len, record_len; 1217 ssize_t rv; 1218 int sockets[2]; 1219 uint8_t record_type; 1220 1221 ATF_REQUIRE(len <= TLS_MAX_MSG_SIZE_V10_2); 1222 1223 plaintext = alloc_buffer(len); 1224 decrypted = malloc(len); 1225 outbuf_cap = tls_header_len(en) + len + tls_trailer_len(en); 1226 outbuf = malloc(outbuf_cap); 1227 hdr = (struct tls_record_layer *)outbuf; 1228 1229 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1230 1231 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 1232 sizeof(*en)) == 0); 1233 check_tls_mode(tc, sockets[1], TCP_TXTLS_MODE); 1234 1235 fd_set_blocking(sockets[0]); 1236 fd_set_blocking(sockets[1]); 1237 1238 ktls_send_control_message(sockets[1], type, plaintext, len); 1239 1240 /* 1241 * First read the header to determine how much additional data 1242 * to read. 1243 */ 1244 rv = read(sockets[0], outbuf, sizeof(struct tls_record_layer)); 1245 ATF_REQUIRE(rv == sizeof(struct tls_record_layer)); 1246 payload_len = ntohs(hdr->tls_length); 1247 record_len = payload_len + sizeof(struct tls_record_layer); 1248 ATF_REQUIRE(record_len <= outbuf_cap); 1249 rv = read(sockets[0], outbuf + sizeof(struct tls_record_layer), 1250 payload_len); 1251 ATF_REQUIRE(rv == (ssize_t)payload_len); 1252 1253 rv = decrypt_tls_record(en, seqno, outbuf, record_len, decrypted, len, 1254 &record_type); 1255 1256 ATF_REQUIRE_MSG((ssize_t)len == rv, 1257 "read %zd decrypted bytes, but wrote %zu", rv, len); 1258 ATF_REQUIRE(record_type == type); 1259 1260 ATF_REQUIRE(memcmp(plaintext, decrypted, len) == 0); 1261 1262 free(outbuf); 1263 free(decrypted); 1264 free(plaintext); 1265 1266 close_sockets(sockets); 1267 } 1268 1269 static void 1270 test_ktls_transmit_empty_fragment(const atf_tc_t *tc, struct tls_enable *en, 1271 uint64_t seqno) 1272 { 1273 struct tls_record_layer *hdr; 1274 char *outbuf; 1275 size_t outbuf_cap, payload_len, record_len; 1276 ssize_t rv; 1277 int sockets[2]; 1278 uint8_t record_type; 1279 1280 outbuf_cap = tls_header_len(en) + tls_trailer_len(en); 1281 outbuf = malloc(outbuf_cap); 1282 hdr = (struct tls_record_layer *)outbuf; 1283 1284 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1285 1286 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 1287 sizeof(*en)) == 0); 1288 check_tls_mode(tc, sockets[1], TCP_TXTLS_MODE); 1289 1290 fd_set_blocking(sockets[0]); 1291 fd_set_blocking(sockets[1]); 1292 1293 /* 1294 * A write of zero bytes should send an empty fragment only for 1295 * TLS 1.0, otherwise an error should be raised. 1296 */ 1297 rv = write(sockets[1], NULL, 0); 1298 if (rv == 0) { 1299 ATF_REQUIRE(en->cipher_algorithm == CRYPTO_AES_CBC); 1300 ATF_REQUIRE(en->tls_vminor == TLS_MINOR_VER_ZERO); 1301 } else { 1302 ATF_REQUIRE(rv == -1); 1303 ATF_REQUIRE(errno == EINVAL); 1304 goto out; 1305 } 1306 1307 /* 1308 * First read the header to determine how much additional data 1309 * to read. 1310 */ 1311 rv = read(sockets[0], outbuf, sizeof(struct tls_record_layer)); 1312 ATF_REQUIRE(rv == sizeof(struct tls_record_layer)); 1313 payload_len = ntohs(hdr->tls_length); 1314 record_len = payload_len + sizeof(struct tls_record_layer); 1315 ATF_REQUIRE(record_len <= outbuf_cap); 1316 rv = read(sockets[0], outbuf + sizeof(struct tls_record_layer), 1317 payload_len); 1318 ATF_REQUIRE(rv == (ssize_t)payload_len); 1319 1320 rv = decrypt_tls_record(en, seqno, outbuf, record_len, NULL, 0, 1321 &record_type); 1322 1323 ATF_REQUIRE_MSG(rv == 0, 1324 "read %zd decrypted bytes for an empty fragment", rv); 1325 ATF_REQUIRE(record_type == TLS_RLTYPE_APP); 1326 1327 out: 1328 free(outbuf); 1329 1330 close_sockets(sockets); 1331 } 1332 1333 static size_t 1334 ktls_receive_tls_record(struct tls_enable *en, int fd, uint8_t record_type, 1335 void *data, size_t len) 1336 { 1337 struct msghdr msg; 1338 struct cmsghdr *cmsg; 1339 struct tls_get_record *tgr; 1340 char cbuf[CMSG_SPACE(sizeof(*tgr))]; 1341 struct iovec iov; 1342 ssize_t rv; 1343 1344 memset(&msg, 0, sizeof(msg)); 1345 1346 msg.msg_control = cbuf; 1347 msg.msg_controllen = sizeof(cbuf); 1348 1349 iov.iov_base = data; 1350 iov.iov_len = len; 1351 msg.msg_iov = &iov; 1352 msg.msg_iovlen = 1; 1353 1354 ATF_REQUIRE((rv = recvmsg(fd, &msg, 0)) > 0); 1355 1356 ATF_REQUIRE((msg.msg_flags & (MSG_EOR | MSG_CTRUNC)) == MSG_EOR); 1357 1358 cmsg = CMSG_FIRSTHDR(&msg); 1359 ATF_REQUIRE(cmsg != NULL); 1360 ATF_REQUIRE(cmsg->cmsg_level == IPPROTO_TCP); 1361 ATF_REQUIRE(cmsg->cmsg_type == TLS_GET_RECORD); 1362 ATF_REQUIRE(cmsg->cmsg_len == CMSG_LEN(sizeof(*tgr))); 1363 1364 tgr = (struct tls_get_record *)CMSG_DATA(cmsg); 1365 ATF_REQUIRE(tgr->tls_type == record_type); 1366 ATF_REQUIRE(tgr->tls_vmajor == en->tls_vmajor); 1367 /* XXX: Not sure if this is what OpenSSL expects? */ 1368 if (en->tls_vminor == TLS_MINOR_VER_THREE) 1369 ATF_REQUIRE(tgr->tls_vminor == TLS_MINOR_VER_TWO); 1370 else 1371 ATF_REQUIRE(tgr->tls_vminor == en->tls_vminor); 1372 ATF_REQUIRE(tgr->tls_length == htons(rv)); 1373 1374 return (rv); 1375 } 1376 1377 static void 1378 test_ktls_receive_app_data(const atf_tc_t *tc, struct tls_enable *en, 1379 uint64_t seqno, size_t len, size_t padding) 1380 { 1381 struct kevent ev; 1382 char *plaintext, *received, *outbuf; 1383 size_t outbuf_cap, outbuf_len, outbuf_sent, received_len, todo, written; 1384 ssize_t rv; 1385 int kq, sockets[2]; 1386 1387 plaintext = alloc_buffer(len); 1388 received = malloc(len); 1389 outbuf_cap = tls_header_len(en) + TLS_MAX_MSG_SIZE_V10_2 + 1390 tls_trailer_len(en); 1391 outbuf = malloc(outbuf_cap); 1392 1393 ATF_REQUIRE((kq = kqueue()) != -1); 1394 1395 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1396 1397 ATF_REQUIRE(setsockopt(sockets[0], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1398 sizeof(*en)) == 0); 1399 check_tls_mode(tc, sockets[0], TCP_RXTLS_MODE); 1400 1401 EV_SET(&ev, sockets[0], EVFILT_READ, EV_ADD, 0, 0, NULL); 1402 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1403 EV_SET(&ev, sockets[1], EVFILT_WRITE, EV_ADD, 0, 0, NULL); 1404 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, NULL) == 0); 1405 1406 received_len = 0; 1407 outbuf_len = 0; 1408 written = 0; 1409 1410 while (received_len != len) { 1411 ATF_REQUIRE(kevent(kq, NULL, 0, &ev, 1, NULL) == 1); 1412 1413 switch (ev.filter) { 1414 case EVFILT_WRITE: 1415 /* 1416 * Compose the next TLS record to send. 1417 */ 1418 if (outbuf_len == 0) { 1419 ATF_REQUIRE(written < len); 1420 todo = len - written; 1421 if (todo > TLS_MAX_MSG_SIZE_V10_2 - padding) 1422 todo = TLS_MAX_MSG_SIZE_V10_2 - padding; 1423 outbuf_len = encrypt_tls_record(en, 1424 TLS_RLTYPE_APP, seqno, plaintext + written, 1425 todo, outbuf, outbuf_cap, padding); 1426 outbuf_sent = 0; 1427 written += todo; 1428 seqno++; 1429 } 1430 1431 /* 1432 * Try to write the remainder of the current 1433 * TLS record. 1434 */ 1435 rv = write(ev.ident, outbuf + outbuf_sent, 1436 outbuf_len - outbuf_sent); 1437 ATF_REQUIRE_MSG(rv > 0, 1438 "failed to write to socket"); 1439 outbuf_sent += rv; 1440 if (outbuf_sent == outbuf_len) { 1441 outbuf_len = 0; 1442 if (written == len) { 1443 ev.flags = EV_DISABLE; 1444 ATF_REQUIRE(kevent(kq, &ev, 1, NULL, 0, 1445 NULL) == 0); 1446 } 1447 } 1448 break; 1449 1450 case EVFILT_READ: 1451 ATF_REQUIRE((ev.flags & EV_EOF) == 0); 1452 1453 rv = ktls_receive_tls_record(en, ev.ident, 1454 TLS_RLTYPE_APP, received + received_len, 1455 len - received_len); 1456 received_len += rv; 1457 break; 1458 } 1459 } 1460 1461 ATF_REQUIRE_MSG(written == received_len, 1462 "read %zu decrypted bytes, but wrote %zu", received_len, written); 1463 1464 ATF_REQUIRE(memcmp(plaintext, received, len) == 0); 1465 1466 free(outbuf); 1467 free(received); 1468 free(plaintext); 1469 1470 close_sockets(sockets); 1471 ATF_REQUIRE(close(kq) == 0); 1472 } 1473 1474 #define TLS_10_TESTS(M) \ 1475 M(aes128_cbc_1_0_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1476 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ZERO) \ 1477 M(aes256_cbc_1_0_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1478 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ZERO) 1479 1480 #define TLS_13_TESTS(M) \ 1481 M(aes128_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1482 TLS_MINOR_VER_THREE) \ 1483 M(aes256_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0, \ 1484 TLS_MINOR_VER_THREE) \ 1485 M(chacha20_poly1305_1_3, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1486 TLS_MINOR_VER_THREE) 1487 1488 #define AES_CBC_NONZERO_TESTS(M) \ 1489 M(aes128_cbc_1_1_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1490 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ONE) \ 1491 M(aes256_cbc_1_1_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1492 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_ONE) \ 1493 M(aes128_cbc_1_2_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1494 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_TWO) \ 1495 M(aes256_cbc_1_2_sha1, CRYPTO_AES_CBC, 256 / 8, \ 1496 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_TWO) \ 1497 M(aes128_cbc_1_2_sha256, CRYPTO_AES_CBC, 128 / 8, \ 1498 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_TWO) \ 1499 M(aes256_cbc_1_2_sha256, CRYPTO_AES_CBC, 256 / 8, \ 1500 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_TWO) \ 1501 M(aes128_cbc_1_2_sha384, CRYPTO_AES_CBC, 128 / 8, \ 1502 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_TWO) \ 1503 M(aes256_cbc_1_2_sha384, CRYPTO_AES_CBC, 256 / 8, \ 1504 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_TWO) \ 1505 1506 #define AES_CBC_TESTS(M) \ 1507 TLS_10_TESTS(M) \ 1508 AES_CBC_NONZERO_TESTS(M) 1509 1510 #define AES_GCM_TESTS(M) \ 1511 M(aes128_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1512 TLS_MINOR_VER_TWO) \ 1513 M(aes256_gcm_1_2, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0, \ 1514 TLS_MINOR_VER_TWO) \ 1515 M(aes128_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1516 TLS_MINOR_VER_THREE) \ 1517 M(aes256_gcm_1_3, CRYPTO_AES_NIST_GCM_16, 256 / 8, 0, \ 1518 TLS_MINOR_VER_THREE) 1519 1520 #define CHACHA20_TESTS(M) \ 1521 M(chacha20_poly1305_1_2, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1522 TLS_MINOR_VER_TWO) \ 1523 M(chacha20_poly1305_1_3, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1524 TLS_MINOR_VER_THREE) 1525 1526 #define GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1527 auth_alg, minor, name, len) \ 1528 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_##name); \ 1529 ATF_TC_BODY(ktls_transmit_##cipher_name##_##name, tc) \ 1530 { \ 1531 struct tls_enable en; \ 1532 uint64_t seqno; \ 1533 \ 1534 ATF_REQUIRE_KTLS(); \ 1535 seqno = random(); \ 1536 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1537 &en); \ 1538 test_ktls_transmit_app_data(tc, &en, seqno, len); \ 1539 free_tls_enable(&en); \ 1540 } 1541 1542 #define ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1543 auth_alg, minor, name) \ 1544 ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_##name); 1545 1546 #define GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1547 auth_alg, minor, name, type, len) \ 1548 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_##name); \ 1549 ATF_TC_BODY(ktls_transmit_##cipher_name##_##name, tc) \ 1550 { \ 1551 struct tls_enable en; \ 1552 uint64_t seqno; \ 1553 \ 1554 ATF_REQUIRE_KTLS(); \ 1555 seqno = random(); \ 1556 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1557 &en); \ 1558 test_ktls_transmit_control(tc, &en, seqno, type, len); \ 1559 free_tls_enable(&en); \ 1560 } 1561 1562 #define ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1563 auth_alg, minor, name) \ 1564 ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_##name); 1565 1566 #define GEN_TRANSMIT_EMPTY_FRAGMENT_TEST(cipher_name, cipher_alg, \ 1567 key_size, auth_alg, minor) \ 1568 ATF_TC_WITHOUT_HEAD(ktls_transmit_##cipher_name##_empty_fragment); \ 1569 ATF_TC_BODY(ktls_transmit_##cipher_name##_empty_fragment, tc) \ 1570 { \ 1571 struct tls_enable en; \ 1572 uint64_t seqno; \ 1573 \ 1574 ATF_REQUIRE_KTLS(); \ 1575 seqno = random(); \ 1576 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1577 &en); \ 1578 test_ktls_transmit_empty_fragment(tc, &en, seqno); \ 1579 free_tls_enable(&en); \ 1580 } 1581 1582 #define ADD_TRANSMIT_EMPTY_FRAGMENT_TEST(cipher_name, cipher_alg, \ 1583 key_size, auth_alg, minor) \ 1584 ATF_TP_ADD_TC(tp, ktls_transmit_##cipher_name##_empty_fragment); 1585 1586 #define GEN_TRANSMIT_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 1587 minor) \ 1588 GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1589 auth_alg, minor, short, 64) \ 1590 GEN_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1591 auth_alg, minor, long, 64 * 1024) \ 1592 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1593 auth_alg, minor, control, 0x21 /* Alert */, 32) 1594 1595 #define ADD_TRANSMIT_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 1596 minor) \ 1597 ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1598 auth_alg, minor, short) \ 1599 ADD_TRANSMIT_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1600 auth_alg, minor, long) \ 1601 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1602 auth_alg, minor, control) 1603 1604 /* 1605 * For each supported cipher suite, run three transmit tests: 1606 * 1607 * - a short test which sends 64 bytes of application data (likely as 1608 * a single TLS record) 1609 * 1610 * - a long test which sends 64KB of application data (split across 1611 * multiple TLS records) 1612 * 1613 * - a control test which sends a single record with a specific 1614 * content type via sendmsg() 1615 */ 1616 AES_CBC_TESTS(GEN_TRANSMIT_TESTS); 1617 AES_GCM_TESTS(GEN_TRANSMIT_TESTS); 1618 CHACHA20_TESTS(GEN_TRANSMIT_TESTS); 1619 1620 #define GEN_TRANSMIT_PADDING_TESTS(cipher_name, cipher_alg, key_size, \ 1621 auth_alg, minor) \ 1622 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1623 auth_alg, minor, padding_1, 0x21 /* Alert */, 1) \ 1624 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1625 auth_alg, minor, padding_2, 0x21 /* Alert */, 2) \ 1626 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1627 auth_alg, minor, padding_3, 0x21 /* Alert */, 3) \ 1628 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1629 auth_alg, minor, padding_4, 0x21 /* Alert */, 4) \ 1630 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1631 auth_alg, minor, padding_5, 0x21 /* Alert */, 5) \ 1632 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1633 auth_alg, minor, padding_6, 0x21 /* Alert */, 6) \ 1634 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1635 auth_alg, minor, padding_7, 0x21 /* Alert */, 7) \ 1636 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1637 auth_alg, minor, padding_8, 0x21 /* Alert */, 8) \ 1638 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1639 auth_alg, minor, padding_9, 0x21 /* Alert */, 9) \ 1640 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1641 auth_alg, minor, padding_10, 0x21 /* Alert */, 10) \ 1642 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1643 auth_alg, minor, padding_11, 0x21 /* Alert */, 11) \ 1644 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1645 auth_alg, minor, padding_12, 0x21 /* Alert */, 12) \ 1646 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1647 auth_alg, minor, padding_13, 0x21 /* Alert */, 13) \ 1648 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1649 auth_alg, minor, padding_14, 0x21 /* Alert */, 14) \ 1650 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1651 auth_alg, minor, padding_15, 0x21 /* Alert */, 15) \ 1652 GEN_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1653 auth_alg, minor, padding_16, 0x21 /* Alert */, 16) 1654 1655 #define ADD_TRANSMIT_PADDING_TESTS(cipher_name, cipher_alg, key_size, \ 1656 auth_alg, minor) \ 1657 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1658 auth_alg, minor, padding_1) \ 1659 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1660 auth_alg, minor, padding_2) \ 1661 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1662 auth_alg, minor, padding_3) \ 1663 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1664 auth_alg, minor, padding_4) \ 1665 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1666 auth_alg, minor, padding_5) \ 1667 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1668 auth_alg, minor, padding_6) \ 1669 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1670 auth_alg, minor, padding_7) \ 1671 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1672 auth_alg, minor, padding_8) \ 1673 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1674 auth_alg, minor, padding_9) \ 1675 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1676 auth_alg, minor, padding_10) \ 1677 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1678 auth_alg, minor, padding_11) \ 1679 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1680 auth_alg, minor, padding_12) \ 1681 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1682 auth_alg, minor, padding_13) \ 1683 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1684 auth_alg, minor, padding_14) \ 1685 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1686 auth_alg, minor, padding_15) \ 1687 ADD_TRANSMIT_CONTROL_TEST(cipher_name, cipher_alg, key_size, \ 1688 auth_alg, minor, padding_16) 1689 1690 /* 1691 * For AES-CBC MTE cipher suites using padding, add tests of messages 1692 * with each possible padding size. Note that the padding_<N> tests 1693 * do not necessarily test <N> bytes of padding as the padding is a 1694 * function of the cipher suite's MAC length. However, cycling 1695 * through all of the payload sizes from 1 to 16 should exercise all 1696 * of the possible padding lengths for each suite. 1697 */ 1698 AES_CBC_TESTS(GEN_TRANSMIT_PADDING_TESTS); 1699 1700 /* 1701 * Test "empty fragments" which are TLS records with no payload that 1702 * OpenSSL can send for TLS 1.0 connections. 1703 */ 1704 AES_CBC_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST); 1705 AES_GCM_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST); 1706 CHACHA20_TESTS(GEN_TRANSMIT_EMPTY_FRAGMENT_TEST); 1707 1708 static void 1709 test_ktls_invalid_transmit_cipher_suite(const atf_tc_t *tc, 1710 struct tls_enable *en) 1711 { 1712 int sockets[2]; 1713 1714 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1715 1716 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_TXTLS_ENABLE, en, 1717 sizeof(*en)) == -1); 1718 ATF_REQUIRE(errno == EINVAL); 1719 1720 close_sockets(sockets); 1721 } 1722 1723 #define GEN_INVALID_TRANSMIT_TEST(name, cipher_alg, key_size, auth_alg, \ 1724 minor) \ 1725 ATF_TC_WITHOUT_HEAD(ktls_transmit_invalid_##name); \ 1726 ATF_TC_BODY(ktls_transmit_invalid_##name, tc) \ 1727 { \ 1728 struct tls_enable en; \ 1729 uint64_t seqno; \ 1730 \ 1731 ATF_REQUIRE_KTLS(); \ 1732 seqno = random(); \ 1733 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1734 &en); \ 1735 test_ktls_invalid_transmit_cipher_suite(tc, &en); \ 1736 free_tls_enable(&en); \ 1737 } 1738 1739 #define ADD_INVALID_TRANSMIT_TEST(name, cipher_alg, key_size, auth_alg, \ 1740 minor) \ 1741 ATF_TP_ADD_TC(tp, ktls_transmit_invalid_##name); 1742 1743 #define INVALID_CIPHER_SUITES(M) \ 1744 M(aes128_cbc_1_0_sha256, CRYPTO_AES_CBC, 128 / 8, \ 1745 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_ZERO) \ 1746 M(aes128_cbc_1_0_sha384, CRYPTO_AES_CBC, 128 / 8, \ 1747 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_ZERO) \ 1748 M(aes128_gcm_1_0, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1749 TLS_MINOR_VER_ZERO) \ 1750 M(chacha20_poly1305_1_0, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1751 TLS_MINOR_VER_ZERO) \ 1752 M(aes128_cbc_1_1_sha256, CRYPTO_AES_CBC, 128 / 8, \ 1753 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_ONE) \ 1754 M(aes128_cbc_1_1_sha384, CRYPTO_AES_CBC, 128 / 8, \ 1755 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_ONE) \ 1756 M(aes128_gcm_1_1, CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, \ 1757 TLS_MINOR_VER_ONE) \ 1758 M(chacha20_poly1305_1_1, CRYPTO_CHACHA20_POLY1305, 256 / 8, 0, \ 1759 TLS_MINOR_VER_ONE) \ 1760 M(aes128_cbc_1_3_sha1, CRYPTO_AES_CBC, 128 / 8, \ 1761 CRYPTO_SHA1_HMAC, TLS_MINOR_VER_THREE) \ 1762 M(aes128_cbc_1_3_sha256, CRYPTO_AES_CBC, 128 / 8, \ 1763 CRYPTO_SHA2_256_HMAC, TLS_MINOR_VER_THREE) \ 1764 M(aes128_cbc_1_3_sha384, CRYPTO_AES_CBC, 128 / 8, \ 1765 CRYPTO_SHA2_384_HMAC, TLS_MINOR_VER_THREE) 1766 1767 /* 1768 * Ensure that invalid cipher suites are rejected for transmit. 1769 */ 1770 INVALID_CIPHER_SUITES(GEN_INVALID_TRANSMIT_TEST); 1771 1772 #define GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1773 auth_alg, minor, name, len, padding) \ 1774 ATF_TC_WITHOUT_HEAD(ktls_receive_##cipher_name##_##name); \ 1775 ATF_TC_BODY(ktls_receive_##cipher_name##_##name, tc) \ 1776 { \ 1777 struct tls_enable en; \ 1778 uint64_t seqno; \ 1779 \ 1780 ATF_REQUIRE_KTLS(); \ 1781 seqno = random(); \ 1782 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1783 &en); \ 1784 test_ktls_receive_app_data(tc, &en, seqno, len, padding); \ 1785 free_tls_enable(&en); \ 1786 } 1787 1788 #define ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1789 auth_alg, minor, name) \ 1790 ATF_TP_ADD_TC(tp, ktls_receive_##cipher_name##_##name); 1791 1792 #define GEN_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 1793 minor) \ 1794 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1795 auth_alg, minor, short, 64, 0) \ 1796 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1797 auth_alg, minor, long, 64 * 1024, 0) 1798 1799 #define ADD_RECEIVE_TESTS(cipher_name, cipher_alg, key_size, auth_alg, \ 1800 minor) \ 1801 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1802 auth_alg, minor, short) \ 1803 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1804 auth_alg, minor, long) 1805 1806 /* 1807 * For each supported cipher suite, run two receive tests: 1808 * 1809 * - a short test which sends 64 bytes of application data (likely as 1810 * a single TLS record) 1811 * 1812 * - a long test which sends 64KB of application data (split across 1813 * multiple TLS records) 1814 */ 1815 AES_CBC_NONZERO_TESTS(GEN_RECEIVE_TESTS); 1816 AES_GCM_TESTS(GEN_RECEIVE_TESTS); 1817 CHACHA20_TESTS(GEN_RECEIVE_TESTS); 1818 1819 #define GEN_RECEIVE_MTE_PADDING_TESTS(cipher_name, cipher_alg, \ 1820 key_size, auth_alg, minor) \ 1821 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1822 auth_alg, minor, padding_1, 1, 0) \ 1823 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1824 auth_alg, minor, padding_2, 2, 0) \ 1825 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1826 auth_alg, minor, padding_3, 3, 0) \ 1827 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1828 auth_alg, minor, padding_4, 4, 0) \ 1829 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1830 auth_alg, minor, padding_5, 5, 0) \ 1831 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1832 auth_alg, minor, padding_6, 6, 0) \ 1833 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1834 auth_alg, minor, padding_7, 7, 0) \ 1835 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1836 auth_alg, minor, padding_8, 8, 0) \ 1837 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1838 auth_alg, minor, padding_9, 9, 0) \ 1839 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1840 auth_alg, minor, padding_10, 10, 0) \ 1841 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1842 auth_alg, minor, padding_11, 11, 0) \ 1843 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1844 auth_alg, minor, padding_12, 12, 0) \ 1845 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1846 auth_alg, minor, padding_13, 13, 0) \ 1847 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1848 auth_alg, minor, padding_14, 14, 0) \ 1849 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1850 auth_alg, minor, padding_15, 15, 0) \ 1851 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1852 auth_alg, minor, padding_16, 16, 0) \ 1853 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1854 auth_alg, minor, padding_16_extra, 16, 16) \ 1855 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1856 auth_alg, minor, padding_32_extra, 16, 32) 1857 1858 #define ADD_RECEIVE_MTE_PADDING_TESTS(cipher_name, cipher_alg, \ 1859 key_size, auth_alg, minor) \ 1860 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1861 auth_alg, minor, padding_1) \ 1862 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1863 auth_alg, minor, padding_2) \ 1864 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1865 auth_alg, minor, padding_3) \ 1866 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1867 auth_alg, minor, padding_4) \ 1868 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1869 auth_alg, minor, padding_5) \ 1870 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1871 auth_alg, minor, padding_6) \ 1872 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1873 auth_alg, minor, padding_7) \ 1874 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1875 auth_alg, minor, padding_8) \ 1876 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1877 auth_alg, minor, padding_9) \ 1878 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1879 auth_alg, minor, padding_10) \ 1880 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1881 auth_alg, minor, padding_11) \ 1882 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1883 auth_alg, minor, padding_12) \ 1884 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1885 auth_alg, minor, padding_13) \ 1886 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1887 auth_alg, minor, padding_14) \ 1888 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1889 auth_alg, minor, padding_15) \ 1890 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1891 auth_alg, minor, padding_16) \ 1892 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1893 auth_alg, minor, padding_16_extra) \ 1894 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1895 auth_alg, minor, padding_32_extra) 1896 1897 /* 1898 * For AES-CBC MTE cipher suites using padding, add tests of messages 1899 * with each possible padding size. Note that the padding_<N> tests 1900 * do not necessarily test <N> bytes of padding as the padding is a 1901 * function of the cipher suite's MAC length. However, cycling 1902 * through all of the payload sizes from 1 to 16 should exercise all 1903 * of the possible padding lengths for each suite. 1904 * 1905 * Two additional tests check for additional padding with an extra 1906 * 16 or 32 bytes beyond the normal padding. 1907 */ 1908 AES_CBC_NONZERO_TESTS(GEN_RECEIVE_MTE_PADDING_TESTS); 1909 1910 #define GEN_RECEIVE_TLS13_PADDING_TESTS(cipher_name, cipher_alg, \ 1911 key_size, auth_alg, minor) \ 1912 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1913 auth_alg, minor, short_padded, 64, 16) \ 1914 GEN_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1915 auth_alg, minor, long_padded, 64 * 1024, 15) 1916 1917 #define ADD_RECEIVE_TLS13_PADDING_TESTS(cipher_name, cipher_alg, \ 1918 key_size, auth_alg, minor) \ 1919 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1920 auth_alg, minor, short_padded) \ 1921 ADD_RECEIVE_APP_DATA_TEST(cipher_name, cipher_alg, key_size, \ 1922 auth_alg, minor, long_padded) 1923 1924 /* 1925 * For TLS 1.3 cipher suites, run two additional receive tests which 1926 * use add padding to each record. 1927 */ 1928 TLS_13_TESTS(GEN_RECEIVE_TLS13_PADDING_TESTS); 1929 1930 static void 1931 test_ktls_invalid_receive_cipher_suite(const atf_tc_t *tc, 1932 struct tls_enable *en) 1933 { 1934 int sockets[2]; 1935 1936 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1937 1938 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1939 sizeof(*en)) == -1); 1940 ATF_REQUIRE(errno == EINVAL); 1941 1942 close_sockets(sockets); 1943 } 1944 1945 #define GEN_INVALID_RECEIVE_TEST(name, cipher_alg, key_size, auth_alg, \ 1946 minor) \ 1947 ATF_TC_WITHOUT_HEAD(ktls_receive_invalid_##name); \ 1948 ATF_TC_BODY(ktls_receive_invalid_##name, tc) \ 1949 { \ 1950 struct tls_enable en; \ 1951 uint64_t seqno; \ 1952 \ 1953 ATF_REQUIRE_KTLS(); \ 1954 seqno = random(); \ 1955 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1956 &en); \ 1957 test_ktls_invalid_receive_cipher_suite(tc, &en); \ 1958 free_tls_enable(&en); \ 1959 } 1960 1961 #define ADD_INVALID_RECEIVE_TEST(name, cipher_alg, key_size, auth_alg, \ 1962 minor) \ 1963 ATF_TP_ADD_TC(tp, ktls_receive_invalid_##name); 1964 1965 /* 1966 * Ensure that invalid cipher suites are rejected for receive. 1967 */ 1968 INVALID_CIPHER_SUITES(GEN_INVALID_RECEIVE_TEST); 1969 1970 static void 1971 test_ktls_unsupported_receive_cipher_suite(const atf_tc_t *tc, 1972 struct tls_enable *en) 1973 { 1974 int sockets[2]; 1975 1976 ATF_REQUIRE_MSG(open_sockets(tc, sockets), "failed to create sockets"); 1977 1978 ATF_REQUIRE(setsockopt(sockets[1], IPPROTO_TCP, TCP_RXTLS_ENABLE, en, 1979 sizeof(*en)) == -1); 1980 ATF_REQUIRE(errno == EPROTONOSUPPORT); 1981 1982 close_sockets(sockets); 1983 } 1984 1985 #define GEN_UNSUPPORTED_RECEIVE_TEST(name, cipher_alg, key_size, \ 1986 auth_alg, minor) \ 1987 ATF_TC_WITHOUT_HEAD(ktls_receive_unsupported_##name); \ 1988 ATF_TC_BODY(ktls_receive_unsupported_##name, tc) \ 1989 { \ 1990 struct tls_enable en; \ 1991 uint64_t seqno; \ 1992 \ 1993 ATF_REQUIRE_KTLS(); \ 1994 seqno = random(); \ 1995 build_tls_enable(cipher_alg, key_size, auth_alg, minor, seqno, \ 1996 &en); \ 1997 test_ktls_unsupported_receive_cipher_suite(tc, &en); \ 1998 free_tls_enable(&en); \ 1999 } 2000 2001 #define ADD_UNSUPPORTED_RECEIVE_TEST(name, cipher_alg, key_size, \ 2002 auth_alg, minor) \ 2003 ATF_TP_ADD_TC(tp, ktls_receive_unsupported_##name); 2004 2005 /* 2006 * Ensure that valid cipher suites not supported for receive are 2007 * rejected. 2008 */ 2009 TLS_10_TESTS(GEN_UNSUPPORTED_RECEIVE_TEST); 2010 2011 /* 2012 * Try to perform an invalid sendto(2) on a TXTLS-enabled socket, to exercise 2013 * KTLS error handling in the socket layer. 2014 */ 2015 ATF_TC_WITHOUT_HEAD(ktls_sendto_baddst); 2016 ATF_TC_BODY(ktls_sendto_baddst, tc) 2017 { 2018 char buf[32]; 2019 struct sockaddr_in dst; 2020 struct tls_enable en; 2021 ssize_t n; 2022 int s; 2023 2024 ATF_REQUIRE_KTLS(); 2025 2026 s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); 2027 ATF_REQUIRE(s >= 0); 2028 2029 build_tls_enable(CRYPTO_AES_NIST_GCM_16, 128 / 8, 0, 2030 TLS_MINOR_VER_THREE, (uint64_t)random(), &en); 2031 2032 ATF_REQUIRE(setsockopt(s, IPPROTO_TCP, TCP_TXTLS_ENABLE, &en, 2033 sizeof(en)) == 0); 2034 2035 memset(&dst, 0, sizeof(dst)); 2036 dst.sin_family = AF_INET; 2037 dst.sin_len = sizeof(dst); 2038 dst.sin_addr.s_addr = htonl(INADDR_BROADCAST); 2039 dst.sin_port = htons(12345); 2040 2041 memset(buf, 0, sizeof(buf)); 2042 n = sendto(s, buf, sizeof(buf), 0, (struct sockaddr *)&dst, 2043 sizeof(dst)); 2044 2045 /* Can't transmit to the broadcast address over TCP. */ 2046 ATF_REQUIRE_ERRNO(EACCES, n == -1); 2047 ATF_REQUIRE(close(s) == 0); 2048 } 2049 2050 ATF_TP_ADD_TCS(tp) 2051 { 2052 /* Transmit tests */ 2053 AES_CBC_TESTS(ADD_TRANSMIT_TESTS); 2054 AES_GCM_TESTS(ADD_TRANSMIT_TESTS); 2055 CHACHA20_TESTS(ADD_TRANSMIT_TESTS); 2056 AES_CBC_TESTS(ADD_TRANSMIT_PADDING_TESTS); 2057 AES_CBC_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST); 2058 AES_GCM_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST); 2059 CHACHA20_TESTS(ADD_TRANSMIT_EMPTY_FRAGMENT_TEST); 2060 INVALID_CIPHER_SUITES(ADD_INVALID_TRANSMIT_TEST); 2061 2062 /* Receive tests */ 2063 TLS_10_TESTS(ADD_UNSUPPORTED_RECEIVE_TEST); 2064 AES_CBC_NONZERO_TESTS(ADD_RECEIVE_TESTS); 2065 AES_GCM_TESTS(ADD_RECEIVE_TESTS); 2066 CHACHA20_TESTS(ADD_RECEIVE_TESTS); 2067 AES_CBC_NONZERO_TESTS(ADD_RECEIVE_MTE_PADDING_TESTS); 2068 TLS_13_TESTS(ADD_RECEIVE_TLS13_PADDING_TESTS); 2069 INVALID_CIPHER_SUITES(ADD_INVALID_RECEIVE_TEST); 2070 2071 /* Miscellaneous */ 2072 ATF_TP_ADD_TC(tp, ktls_sendto_baddst); 2073 2074 return (atf_no_error()); 2075 } 2076